예제 #1
0
def vv_autocorr(vs, normalize=False):
    normalize = normalize and 1
    fields = helpy.vel_dtype.names
    vvs = [corr.autocorr(helpy.consecutive_fields_view(tv, fields),
                         norm=normalize, cumulant=True)
           for pvs in vs.itervalues() for tv in pvs.itervalues()]
    vvs, vv, dvv = helpy.avg_uneven(vvs, weight=True)
    return [np.array(a, order='C').astype('f4').view(helpy.vel_dtype).squeeze()
            for a in vvs, vv, dvv]
예제 #2
0
def vv_autocorr(vs, normalize=False):
    normalize = normalize and 1
    fields = helpy.vel_dtype.names
    vvs = [
        corr.autocorr(helpy.consecutive_fields_view(tv, fields),
                      norm=normalize,
                      cumulant=True) for pvs in vs.itervalues()
        for tv in pvs.itervalues()
    ]
    vvs, vv, dvv = helpy.avg_uneven(vvs, weight=True)
    return [
        np.array(a, order='C').astype('f4').view(helpy.vel_dtype).squeeze()
        for a in vvs, vv, dvv
    ]
예제 #3
0
def vv_autocorr(prefixes, corrlen=0.5, **compile_args):
    vs = defaultdict(list)
    compile_noise(prefixes, vs, cat=False, **compile_args)
    vvs = {}
    for v, tvs in vs.iteritems():
        vcorrlen = int(corrlen*max(map(len, tvs))) if corrlen < 1 else corrlen
        vv = np.full((len(tvs), vcorrlen), np.nan, float)
        for i, tv in enumerate(tvs):
            ac = corr.autocorr(tv, norm=1, cumulant=False)
            vv[i, :len(ac)] = ac[:corrlen]
        vvcount = np.isfinite(vv).sum(0)
        vv = vv[:, vvcount > 0]
        vv = np.nanmean(vv, 0)
        dvv = np.nanstd(vv, 0)/np.sqrt(vvcount)
        vvs[v] = vv, dvv
    return vvs
예제 #4
0
    # Calculate the <nn> correlation for all the tracks in a given dataset
    # TODO: fix this to combine multiple datasets (more than one prefix)

    if args.verbose:
        print 'calculating <nn> correlations for track'
        coscorrs = []
        sincorrs = []
        for t, trackset in tracksets.iteritems():
            print t,
            o = trackset['o']
            if args.verbose > 1:
                print o.shape, o.dtype
            sys.stdout.flush()
            cos = np.cos(o)
            sin = np.sin(o)
            coscorr = corr.autocorr(cos, cumulant=False, norm=False)
            sincorr = corr.autocorr(sin, cumulant=False, norm=False)
            coscorrs.append(coscorr)
            sincorrs.append(sincorr)
    else:
        coscorrs = [ corr.autocorr(np.cos(trackset['o']), cumulant=False, norm=False)
                    for trackset in tracksets.values() ]
        sincorrs = [ corr.autocorr(np.sin(trackset['o']), cumulant=False, norm=False)
                    for trackset in tracksets.values() ]

    # Gather all the track correlations and average them
    allcorr = coscorrs + sincorrs
    allcorr = helpy.pad_uneven(allcorr, np.nan)
    tcorr = np.arange(allcorr.shape[1])/fps
    meancorr = np.nanmean(allcorr, 0)
    added = np.sum(np.isfinite(allcorr), 0)
예제 #5
0
        if args.singletracks:
            mask = np.in1d(trackids, args.singletracks)
        else:
            mask = None
        plot_tracks(data, trackids, bgimage, mask=mask,
                    save=args.save, show=args.show)

if __name__=='__main__' and args.nn:
    # Calculate the <nn> correlation for all the tracks in a given dataset
    # TODO: fix this to combine multiple datasets (more than one prefix)

    data, trackids, odata, omask = helpy.load_data(prefix, True, False)
    tracksets, otracksets = helpy.load_tracksets(data, trackids, odata, omask,
                                                 min_length=args.stub)

    coscorrs = [ corr.autocorr(np.cos(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]
    sincorrs = [ corr.autocorr(np.sin(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]

    # Gather all the track correlations and average them
    allcorr = coscorrs + sincorrs
    allcorr = helpy.pad_uneven(allcorr, np.nan)
    tcorr = np.arange(allcorr.shape[1])/fps
    meancorr = np.nanmean(allcorr, 0)
    added = np.sum(np.isfinite(allcorr), 0)
    errcorr = np.nanstd(allcorr, 0)/np.sqrt(added - 1)
    sigma = errcorr + 1e-5*errcorr.std() # add something small to prevent 0
    if verbose:
        print "Merged nn corrs"
예제 #6
0
                bgimage = Im.open(locdir+prefix+'_001.tif')
            except IOError:
                bgimage = None
        if singletracks:
            mask = np.in1d(trackids, singletracks)
        plot_tracks(data, trackids, bgimage, mask=mask)

if __name__=='__main__' and args.nn:
    # Calculate the <nn> correlation for all the tracks in a given dataset
    # TODO: fix this to combine multiple datasets (more than one prefix)

    data, trackids, odata, omask = helpy.load_data(prefix, True, False)
    tracksets, otracksets = helpy.load_tracksets(data, trackids, odata, omask,
                                                 min_length=args.stub)

    coscorrs = [ corr.autocorr(np.cos(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]
    sincorrs = [ corr.autocorr(np.sin(otrackset), cumulant=False, norm=False)
                for otrackset in otracksets.values() ]

    # Gather all the track correlations and average them
    allcorr = coscorrs + sincorrs
    allcorr = helpy.pad_uneven(allcorr, np.nan)
    tcorr = np.arange(allcorr.shape[1])/fps
    meancorr = np.nanmean(allcorr, 0)
    errcorr = np.nanstd(allcorr, 0)/sqrt(len(allcorr))
    if verbose:
        print "Merged nn corrs"

    # Fit to exponential decay
    tmax = 50