示例#1
0
def load_tracksets(data, trackids=None, min_length=10, verbose=False,
        run_remove_dupes=False, run_fill_gaps=False, run_track_orient=False):
    """ Returns a dict of slices into data based on trackid
    """
    if trackids is None:
        # copy actually speeds it up by a factor of two
        trackids = data['t'].copy()
    elif not trackids.flags.owndata:
        # copy in case called as ...(data, data['t'])
        trackids = trackids.copy()
    lengths = np.bincount(trackids+1)[1:]
    if min_length > 1:
        lengths = lengths >= min_length
    longtracks = np.where(lengths)[0]
    tracksets = {track: data[trackids==track] for track in longtracks}
    if run_remove_dupes:
        from tracks import remove_duplicates
        remove_duplicates(tracksets=tracksets, inplace=True, verbose=verbose)
    if run_fill_gaps:
        from tracks import fill_gaps
        fill_gaps(tracksets=tracksets, inplace=True, verbose=verbose)
    if run_track_orient:
        from orientation import track_orient
        for track in tracksets:
            tracksets[track]['o'] = track_orient(tracksets[track]['o'])
    return tracksets
示例#2
0
def merge_data(datasets, savename=None, dupes=False, do_orient=False):
    """ returns (and optionally saves) new `data` array merged from list or
        tuple of individual `data` arrays or path prefixes.

        parameters
        ----------
        datasets : list of arrays, prefixes, or single prefix with wildcards
        savename : path prefix at which to save the merged data,
            saved as "<savename>_MERGED_<TRACKS|ORIENTATION>.npz"
        do_orient : True or False, whether to merge the orientation data as
            well. default is False, NOT IMPLEMENTED

        returns
        -------
        merged : always returned, the main merged data array

        if orientational data is to be merged, then a list of filenames or
        prefixes must be given instead of data objects.

        only data is returned if array objects are given.
    """
    if do_orient:
        raise ValueError, 'do_orient is not possible yet'
    if isinstance(datasets, str):
        if '*' in datasets or '?' in datasets:
            from glob import glob
            suf = '_TRACKS.npz'
            datasets = [ s[:-len(suf)] for s in glob(datasets+suf) ]
        elif datasets.endsith('.npz'):
            raise ValueError, "please give only the prefix"
        else:
            raise ValueError, "only one file given"

    if isinstance(datasets[0], str):
        if datasets[0].endswith('.npz'):
            raise ValueError, "please only give the prefix"
        print 'Merging'
        print '\t\n'.join(datasets)
        datasets = map(load_data, datasets)

    track_increment = 0
    for dataset in datasets:
        ts = quick_field_view(dataset, 't', False)
        if dupes:
            from tracks import remove_duplicates
            ts[:] = remove_duplicates(ts, dataset)
        ts[ts >= 0] += track_increment
        track_increment = ts.max() + 1

    merged = np.concatenate(datasets)

    if savename:
        fulldir = os.path.abspath(os.path.dirname(savename))
        if not os.path.exists(fulldir):
            print "Creating new directory", fulldir
            os.makedirs(fulldir)
        savename += '_MERGED_TRACKS.npz'
        np.savez_compressed(savename, data=merged)
        print "saved to", savename
    return merged
 def convol_track_config(self, pdata, odata, startframe):
     pfsets = helpy.load_framesets(pdata)
     pftrees = {f: KDTree(helpy.consecutive_fields_view(pfset, 'xy'),
                          leafsize=32) for f, pfset in pfsets.iteritems()}
     trackids = tracks.find_tracks(pdata, maxdist= self.max_dist, giveup = 10, n = 0, stub = 20, \
                                   pftrees = pftrees, pfsets = pfsets, startframe = startframe)
     trackids = tracks.remove_duplicates(trackids, data = pdata)
     return_data = helpy.initialize_tdata(pdata, trackids, odata)
     return_data = helpy.add_self_view(return_data, ('x','y'),'xy')
     return return_data
示例#4
0
def compile_noise(prefixes, vs, width=3, side=1, fps=1, cat=True,
                  do_orientation=True, do_translation=True, subtract=True,
                  minlen=10, torient=True, interp=True, dupes=False, **ignored):
    if np.isscalar(prefixes):
        prefixes = [prefixes]
    for prefix in prefixes:
        if args.verbose:
            print "Loading data for", prefix
        data = helpy.load_data(prefix, 'tracks')
        if dupes:
            data['t'] = tracks.remove_duplicates(data['t'], data)
        tracksets = helpy.load_tracksets(data, min_length=minlen,
                run_track_orient=torient, run_fill_gaps=interp)
        for track in tracksets:
            tdata = tracksets[track]
            velocities = noise_derivatives(tdata, width=width,
                    side=side, fps=fps, do_orientation=do_orientation,
                    do_translation=do_translation, subtract=subtract)
            for v in velocities:
                vs[v].append(velocities[v])
    if cat:
        for v in vs:
            vs[v] = np.concatenate(vs[v], -1)
    return len(tracksets)