Exemple #1
0
def preProcessData(ball, max_displacement=20, memory_val=5, minTrajectory=9):
    '''
    This function takes a dataFrame with unsorted data and links the trajectories
    We then filter any trajectories that are too short to be useful and return the 
    updated dataFrame.
    
    Inputs: 
        ball - DataFrame with unsorted particle coordinates
        max_displacement - max distance in pixels a spot could have moved between frames
        memore_val - Number of frames a spot can be missing for
        minTrajectory - Shortest useful trajectory to keep
    
    Outputs:
        ball - dataFrame which now includes a particle number for each unique 
        trajectory and the spurious dots removed. spurious = not found for more than minTrajectory
        frames.
    '''

    tp.link_df(ball, max_displacement, memory=memory_val)

    #remove those dots that are only measured in a single frame
    ball = tp.filter_stubs(ball, threshold=minTrajectory)

    #ball.drop('frame',axis=1,inplace=True)

    #print(ball.head(n=20))

    return ball
Exemple #2
0
    def test_t_column(self):
        f = self.features.copy()
        cols = list(f.columns)
        name = 'arbitrary name'
        cols[cols.index('frame')] = name
        f.columns = cols

        # smoke tests
        tp.link_df(f, 5, t_column=name, verify_integrity=True)
        tp.link_df_iter(f, 5, t_column=name, verify_integrity=True)
Exemple #3
0
    def test_t_column(self):
        f = self.features.copy()
        cols = list(f.columns)
        name = 'arbitrary name'
        cols[cols.index('frame')] = name
        f.columns = cols

        # smoke tests
        tp.link_df(f, 5, t_column=name, verify_integrity=True)
        tp.link_df_iter(f, 5, t_column=name, verify_integrity=True)
Exemple #4
0
    def test_t_column(self):
        f = self.features.copy()
        cols = list(f.columns)
        name = 'arbitrary name'
        cols[cols.index('frame')] = name
        f.columns = cols

        # smoke tests
        tp.link_df(f, 5, t_column=name, verify_integrity=True)

        f_iter = (frame for fnum, frame in f.groupby('arbitrary name'))
        list(tp.link_df_iter(f_iter, 5, t_column=name, verify_integrity=True))
Exemple #5
0
    def test_t_column(self):
        f = self.features.copy()
        cols = list(f.columns)
        name = 'arbitrary name'
        cols[cols.index('frame')] = name
        f.columns = cols

        # smoke tests
        tp.link_df(f, 5, t_column=name, verify_integrity=True)

        f_iter = (frame for fnum, frame in f.groupby('arbitrary name'))
        list(tp.link_df_iter(f_iter, 5, t_column=name, verify_integrity=True))
Exemple #6
0
def form_trajectories(loc):
	global particles,P,T,bends,track
	print
	print 'Forming worm trajectories...',
	data = {'x':[],'y':[],'frame':[],
			'eccentricity':[],'area':[],
            'minor_axis_length':[],
            'area_eccentricity':[]}
	for t,l in enumerate(loc):
		data['x'] += [d['centroid'][0] for d in l]
		data['y'] += [d['centroid'][1] for d in l]
		data['eccentricity'] += [d['eccentricity'] for d in l]
		data['area_eccentricity'] += [d['area_eccentricity'] for d in l]
		data['minor_axis_length'] += [d['minor_axis_length'] for d in l]
		data['area'] += [d['area'] for d in l]
		data['frame'] += [t]*len(l)
	data = pd.DataFrame(data)
	try:
		track = tp.link_df(data,search_range=max_dist_move,memory=memory)
	except tp.linking.SubnetOversizeException:
		print 'Linking problem too complex. Reduce maximum move distance or memory.'
		print 'Stopping.'
		exit()
	track = tp.filter_stubs(track, min([min_track_length,len(loc)]))
	try:
		trackfile = open('%strack.p'%save_as,'w')
		cPickle.dump(track, trackfile)
		trackfile.close()
	except:
		print 'Warning: no track file saved. Track too long.'
		print '         plot_path.py will not work on this file.'

	return track
Exemple #7
0
def link_ts_table(
    ts_table: pd.DataFrame,
    min_frame=None,
    max_frame=None,
    exposure_ms=60,
    link_distance_um=0.3,
    link_memory=1,
    verbose=0,
    loc_num_plot=True,
):
    """
    links particles using 'x [nm]', 'y [nm]', 'frame' collumns with trackpy
    returns Dataframe with 'x', 'y', 'frame', 'particle' columns
    """
    df = pd.DataFrame(columns=["x", "y", "frame"],
                      data=ts_table[["x [nm]", "y [nm]", "frame"]].values)

    df.x = df.x / 1000  # nm -> um
    df.y = df.y / 1000  # nm -> um
    if min_frame:
        df = df[df.frame >= min_frame]
    if max_frame:
        df = df[df.frame <= min_frame]
    if verbose:
        df.head()
    print(f"Linking with max distance {link_distance_um} um")
    tracks = tp.link_df(df, search_range=link_distance_um, memory=link_memory)
    if verbose:
        print(tracks.head())

    return tracks
Exemple #8
0
 def set_trajectories(self,
                      link=True,
                      search_range=2.,
                      verbose=True,
                      **kwargs):
     df = self.trajectories
     if df.empty:
         for frame in self.frames:
             df = df.append(frame.to_df())
     if link:
         if not verbose:
             tp.quiet(suppress=True)
         # display(df)
         print(df)
         df = df.rename(columns={
             'x_p': 'x',
             'y_p': 'y',
             'framenumber': 'frame'
         })
         # display(df)
         print(df)
         df = tp.link_df(df, search_range, **kwargs)
         df = df.rename(columns={
             'x': 'x_p',
             'y': 'y_p',
             'frame': 'framenumber'
         })
     self._trajectories = df
Exemple #9
0
    def track_cell(self):
        det_df = pd.read_csv(self.config.OUTPUT_PATH + self.config.ROOT_NAME + \
            '-cellDetData.csv')

        blobs_df = tp.link_df(
            det_df,
            search_range=self.config.CELL_SEARCH_RANGE,
            memory=self.config.CELL_MEMORY,
        )
        blobs_df = tp.filter_stubs(blobs_df, 5)
        blobs_df = blobs_df.reset_index(drop=True)
        blobs_df = add_traj_length(blobs_df)

        traj_num_before = blobs_df['particle'].nunique()
        after_filter_df = blobs_df[
            blobs_df['traj_length'] >= self.config.CELL_TRAJ_LEN_THRES]
        print("######################################")
        print("Trajectory number before filters: \t%d" % traj_num_before)
        print("Trajectory number after filters: \t%d" %
              after_filter_df['particle'].nunique())
        print("######################################")

        blobs_df.round(6).to_csv(self.config.OUTPUT_PATH + self.config.ROOT_NAME + \
               '-cellPhysData.csv', index=False)

        self.config.save_config()
def compute_trajectories(input_file_name, mem, sr, width, height, **kwargs):
    df = pd.read_csv(input_file_name)
    df.drop_duplicates(subset=['t', 'x', 'y'],
                       keep='first',
                       inplace=True,
                       ignore_index=True)
    # df.drop_duplicates(inplace=True, ignore_index=True)
    print(f"tracking spiral tips for {os.path.basename(input_file_name)}...")
    t_list = sorted(set(df.t.values))
    frameno_list = list(range(len(t_list)))

    df['frame'] = -9999
    for frameno, t in zip(frameno_list, t_list):
        df.loc[df.t == t, 'frame'] = frameno
    #assert that all entries were given a value
    assert (not (df.frame < 0).any())

    #consider all tip pairs
    # width, height = 200, 200 # txt.shape[:2]
    distance_L2_pbc = get_distance_L2_pbc(width, height)

    link_kwargs = {
        'neighbor_strategy': 'BTree',
        'adaptive_step': 0.5,
        'adaptive_stop': 1e-5,
        'dist_func': distance_L2_pbc,
        'memory': mem,
        'search_range': sr
    }

    # df['frame'] = np.around(df['t']/h)
    # df = df.astype(dtype={'frame':int}).copy()
    traj = trackpy.link_df(f=df.head(-1), t_column='frame', **link_kwargs)
    return traj
Exemple #11
0
def track_particles(df, search_range=3, memory=5, **kwargs):
    """Assemble particles - call them tracks, trajectories - from the features located in the DNA's region of interest.

    This is a wrapper for the `trackpy.link_df` function.
    See its documentation for parameters and more information.

    Parameters:
    ----------
    search_range: int
        The radius of pixels around a feature in which to search for the next feature.
    memory: int
        The max number of frames between two features for them to be considered the same particle
    any keyword argument to be passed to the trackpy ``link_df`` function.

    Returns:
    --------
    particles: cats.particles.Particles
        The Particles found

    """
    ps = cats.particles.Particles(
        tp.link_df(df, search_range=search_range, memory=memory, **kwargs))

    # Setup element attributes from Features
    if hasattr(df, '_element_attributes') and 'particle' not in df.columns:
        for attr, value in df._element_attributes.items():
            ps._set_element_attribute(attr, value)
    kwargs['search_range'] = search_range
    kwargs['memory'] = memory
    ps._update_element_attribute('tracking_parameters', kwargs, ps)
    return ps
def link_df(obj,
            ParameterJsonFile,
            SearchFixedParticles=False,
            max_displacement=None,
            dark_time=None):
    """ define the parameters for the trackpy routine tp.link, which forms trajectories
    out of particle positions, out of the json file
    
    important parameters:
    SearchFixedParticles = defines whether fixed or moving particles are under current investigation
    dark_time            = settings["Link"]["Dark time"] ... maximum number of frames a particle can disappear
    max_displacement     = ["Link"]["Max displacement"]   ...maximum displacement between two frames

    """
    settings = nd.handle_data.ReadJson(ParameterJsonFile)

    dark_time = settings["Link"]["Dark time"]

    if SearchFixedParticles == False:
        max_displacement = settings["Link"]["Max displacement"]
    else:
        max_displacement = settings["Link"]["Max displacement fix"]

    t1_orig = tp.link_df(obj, max_displacement, memory=dark_time)

    nd.handle_data.WriteJson(ParameterJsonFile, settings)

    return t1_orig
Exemple #13
0
def form_trajectories(loc, settings):
    """Form worm trajectories."""
    print('Forming worm trajectories...', end=' ')
    data = {'x': [], 'y': [], 'frame': [],
            'eccentricity': [], 'area': [],
            'minor_axis_length': [],
            'area_eccentricity': []}
    for t, l in enumerate(loc):
        data['x'] += [d['centroid'][0] for d in l]
        data['y'] += [d['centroid'][1] for d in l]
        data['eccentricity'] += [d['eccentricity'] for d in l]
        data['area_eccentricity'] += [d['area_eccentricity'] for d in l]
        data['minor_axis_length'] += [d['minor_axis_length'] for d in l]
        data['area'] += [d['area'] for d in l]
        data['frame'] += [t] * len(l)
    data = pd.DataFrame(data)
    try:
        track = tp.link_df(data, search_range=settings["max_dist_move"],
                           memory=settings["memory"])
    except tp.linking.SubnetOversizeException:
        raise RuntimeError(
            'Linking problem too complex.'
            ' Reduce maximum move distance or memory.')
    track = tp.filter_stubs(track, min([settings["min_track_length"],
                                        len(loc)]))
    try:
        with open(os.path.join(settings["save_as"], 'track.p'),
                  'bw') as trackfile:
            pickle.dump(track, trackfile)
    except Exception:
        traceback.print_exc()
        print('Warning: no track file saved. Track too long.')
        print('         plot_path.py will not work on this file.')

    return track
Exemple #14
0
def find_trajs():
    #find the trajectories of particles across an experiment
    partDict = get_partDict()
    time_list, x_list, y_list, z_list, a_list = [], [], [], [], []
    for time in partDict:
        param = partDict[time]
        for particle in param:
            time_list.append(time)
            x_list.append(particle[0])
            y_list.append(particle[1])
            z_list.append(particle[2])
            a_list.append(particle[3])
    df = DataFrame(time_list, columns=['t'])
    df.insert(1, 'x', x_list, True)
    df.insert(2, 'y', y_list, True)
    df.insert(3, 'z', z_list, True)
    df.insert(4, 'a', a_list, True)
    thresh = [300, 300, 300, 2]
    t = tp.link_df(df,
                   thresh,
                   memory=40,
                   pos_columns=['x', 'y', 'z', 'a'],
                   t_column='t')
    pd.set_option('display.max_rows', None, 'display.max_columns', None)
    #t1 = tp.filter(t, condition)
    print(t)
    return t
Exemple #15
0
def cleanup_track(parts,
                  traj_len_thresh=1,
                  part_per_frame=1,
                  remove_hotpixels=10):
    # make sure df belongs to single movie
    assert len(parts.mov_name.unique()) == 1
    # remove peaks outside cells
    parts_filt = parts[parts.roi > 0]
    # remove hot pixels
    if remove_hotpixels:
        parts_filt = parts_filt[parts_filt.mass < parts_filt.mass.mean() *
                                remove_hotpixels]
    # keep two brightest spots per ROI per frame; 'tail' method allows to keep top N
    parts_filt = parts_filt.sort_values('mass').groupby(
        ['roi', 'frame']).tail(part_per_frame + 1).reset_index(drop=True)
    # link particles with search range of 5 px and memory of 1 frame
    parts_filt = tp.link_df(parts_filt, 5, memory=1)
    # compute trajectory length for each particle
    traj_len = parts_filt.groupby('particle').count().reset_index()[[
        'particle', 'x'
    ]]
    traj_len.columns = ['particle', 'traj_len']
    parts_filt = pd.merge(parts_filt, traj_len, on='particle')
    # remove trajectories too long to be true
    parts_filt = parts_filt[parts_filt.traj_len < parts_filt.traj_len.mean() *
                            10]
    # filter spurious spots by trajectory length; faster than tp.filter_stubs
    parts_filt = parts_filt[parts_filt.traj_len > traj_len_thresh]
    # keep only one spot per ROI per frame: brightest or with longest traj
    parts_filt = parts_filt.sort_values(['mass', 'traj_len']).groupby(
        ['roi', 'frame']).tail(part_per_frame).reset_index(drop=True)
    # assign unique particle id
    parts_filt['pid'] = parts_filt['mov_name'] + '_' + parts_filt[
        'particle'].apply(str) + '_' + parts_filt['frame'].apply(str)
    return parts_filt
    def link_trajectories(self, f_index=None):

        """Implements the trackpy functions link_df and filter_stubs"""
        # Reload DataStore
        if f_index is None:
            'When processing whole video store in file with same name as movie'
            data_filename = self.data_filename
        else:
            'store temporarily'
            data_filename = self.data_filename[:-5] + '_temp.hdf5'

        with dataframes.DataStore(data_filename, load=True) as data:
            if f_index is None:
                # Trackpy methods
                data.reset_index()
                data.df = trackpy.link_df(data.df, get_param_val(self.parameters['default']['max_frame_displacement']),memory=get_param_val(self.parameters['default']['memory']))
                data.df = trackpy.filter_stubs(data.df, get_param_val(self.parameters['default']['min_frame_life']))
            else:
                #Adds a particle id to single temporary dataframes for convenience
                num_particles = np.shape(data.df)[0]
                pids = np.linspace(0,num_particles-1, num=num_particles).astype(int)
                data.df['particle'] = pids

            # Save DataStore
            data.save(filename=data_filename)
    def generate_trajectories(self, subject='particles', plot=True, export=True, invert=True, memory=50):
        """

        :param subject:
        :param plot:
        :param export:
        :param invert:
        :param memory:
        :return:
        """
        fv = tp.batch(self.image_sequence, self.particlesize, minmass=self.particleminmass, invert=invert)
        t = tp.link_df(fv, 5, memory=memory)
        if subject == 'particles':
            self.particle_trajectory_list = TrajectorySequence(t)
        elif subject == 'algae':
            self.algae_trajectory_list = TrajectorySequence(t)
        else:
            raise Exception('The argument subject of the current method '
                            '(generate_trajectories) must be either particles or algae')
        if plot:
            tp.plot_traj(t, label=True)
            plt.show()
        if export:
            t.to_csv(self.path + '\\t_' + subject + '.csv', index=None, header=True)
        if subject == 'particles':
            return self.particle_trajectory_list
        if subject == 'algae':
            return self.algae_trajectory_list
Exemple #18
0
def extract_tracks_from_frames(frames):
    """Extract tracks as dataframe from movie frames.

    Parameters
    ----------
    frames: np.array
        les différentes images chargées depuis le fichier .tif
        en tant que numpy array

    Returns
    -------
    tracks: pd.DataFrame
        les déplacements des nanoparticules dans le film sous la forme
        d'un dataframe pandas. C'est trackpy qui fait la plupart du travail

    """
    Nbframe = frames.shape[0]
    print('[extract.py] Running extraction from tracks')
    print('Second Step: Localizaton of the nanoparticles in each frame')
    f = tp.batch(frames[:Nbframe], 7, minmass=150, preprocess=False)

    print('Third Step: Computation of the trajectories')
    tracks = tp.link_df(f, search_range=5, memory=7)
    # PB avec le choix des paramètres de memory. Certaines trajectoires sont mal interprétées
    # exemple du film 411 : avec memory = 7, on a une traj qui commence sur un stop bordélique
    # avec memory = 2, la trajectoire passe au dessus du stop en question : c'est ce qui se passe
    # en réalité si on regarde le film. Le pb (je crois) est lié au fait que link_df ne prédit pas
    # la prochaine position en fonction de la vitesse. Ca doit être possible mais comment ?

    return tracks
def get_data(raw_data_path, part_select_dict):
    """
    :param raw_data_path: string
    :return:
    """
    print("Processing file " + raw_data_path)
    data_filename = os.path.splitext(os.path.basename(raw_data_path))[0]
    data = pd.read_csv(raw_data_path)
    data = tp.link_df(data,
                      analyzer.MAX_PIXELS_BW_FRAMES,
                      memory=analyzer.TRACKING_MEMORY)
    print(
        str(len(data.particle.unique())) + " initial trajectories in " +
        raw_data_path)
    data = tp.filter_stubs(data, analyzer.MIN_TRACK_LENGTH)
    print(
        str(len(data.particle.unique())) + " trajectories span at least " +
        str(analyzer.MIN_TRACK_LENGTH) + " frames")
    data = analyzer.filter_particles_and_add_actual_size(
        data, data_filename, part_select_dict)
    print(str(len(data.particle.unique())) + " selected particles left")
    # drift = tp.compute_drift(data)
    # data = tp.subtract_drift(data, drift)
    # Plotting trajectories after drift cancelling
    # plt.figure().suptitle("Sample particle trajectories with drift")
    # tp.plot_traj(data)
    data = analyzer.cancel_avg_velocity_drift(data)
    # Plotting trajectories after drift cancelling
    # plt.figure().suptitle("Sample particle trajectories without drift")
    # tp.plot_traj(data)
    data = add_environment_variables(data, raw_data_path)
    return data
Exemple #20
0
def example_with_trackpy_and_twv(filename):
    """
    Example usecase from input file to particle and trap positions in .dat file.
    """
    frames = pims.open(filename)
    # Open file with pims. Works with many file extensions.
    # This example assumes .twv file.

    # metadata = frames.get_all_metadata()
    # Optional access to additional metadata.

    times, laser_powers, traps = frames.get_all_tweezer_positions()
    # Obtain frame times, laser power at each frame time and
    # traps powers and positions at each frame.

    features = tp.batch(frames, 25, minmass=1000, invert=False)
    # Obtain features (particle positions) using trackpy's batch function.
    # It is verbose.
    # The 25 in arguments is diameter. It be odd number.
    # It is recommended to obtain parameters using GUI.

    tracks = tp.link_df(features, 15, memory=10)
    # Joins particles positions to tracks (connects them in time).
    # See trackpy documentation for parameters.

    save_tracked_data_pandas(filename[:-4] + '_out.dat', frames, tracks, times,
                             laser_powers, traps)
Exemple #21
0
def compute_track_tips_pbc(df,
                           mem,
                           sr,
                           width,
                           height,
                           adaptive_step=0.5,
                           adaptive_stop=1e-5,
                           **kwargs):
    '''returns a dataframe of trajectories resulting from the positions
    listed in the .csv, input_file_name using period boundary conditions (pbc).
    sr is the search range, which needs to be bigger than sqrt(max(width,height))
    to work with periodic boundary conditions.'''
    # distance_L2_pbc = get_distance_L2_pbc(width,height)
    # df = pd.read_csv(input_file_name)
    #assign each time a unique frame number
    t_list = sorted(set(df.t.values))
    frameno_list = list(range(len(t_list)))
    df['frame'] = -9999
    for frameno, t in zip(frameno_list, t_list):
        df.loc[df.t == t, 'frame'] = frameno
    #assert that all entries were given a value
    assert (not (df.frame < 0).any())
    distance_L2_pbc = get_distance_L2_pbc(width, height)

    link_kwargs = {
        'neighbor_strategy': 'BTree',
        'adaptive_step': adaptive_step,
        'adaptive_stop': adaptive_stop,
        'dist_func': distance_L2_pbc,
        'memory': mem,
        'search_range': sr
    }

    traj = trackpy.link_df(f=df.head(-1), t_column='frame', **link_kwargs)
    return traj
Exemple #22
0
 def setUpClass(cls):
     super(TestReproducibility, cls).setUpClass()
     # generate a new file
     video = pims.ImageSequence(
         os.path.join(path, 'video', 'image_sequence'))
     actual = tp.batch(invert_image(video), diameter=9, minmass=240)
     actual = tp.link_df(actual, search_range=5, memory=2)
     actual.to_csv(reproduce_fn)
Exemple #23
0
def linkTrajectories(circles_tp, removeDrift=False):

    trajectories = tp.link_df(circles_tp, 5, memory=10)
    if removeDrift == True:
        drift = tp.compute_drift(trajectories)
        trajectories = tp.subtract_drift(trajectories.copy(), drift)

    return trajectories
    """
def trackpy_gaussian_rot_motion_linker(data_frame, search_range, rot_velocity=0.0, fwhm=6, memory=0, **kwargs):
    '''A wrapper for trackpy linking that includes a predictor for rotating particles

    :params data_frame: DataFrame containing all the particle position information
    :params search_range: Max distance a particle can move between frames
    :params rot_velocity: The bias (in degrees) that a candidate particle should be
    found at for each frame. This value reflects the maximum bias applied to the 
    particle based on its position.
    :param fwhm: The full-width-half-maximum of the guassian function which applies
    the rotational bias. Should be set to the full-width-half-maximum of the ring 
    trap in r.
    :params memory: The number of frames a particle can disappear for and still be 
    considered the same particle.
    :params kwrgs: Additional keyword arguments passed to trackpy.link_df
    '''

    # Find the particle locations in polar coords
    xf, yf, rf = cf.least_sq_fit_circle(data_frame)
    cf.polar_coor_data_frame(data_frame, xf, yf)
    
    # Setup the gaussian profile of the bias using the full width half
    # max and the radius for the circle fit.
    std = fwhm/2.3548
    mean = rf
    guass = lambda x: np.exp(-(x - mean)**2/(2 * std**2)) 
    
    # Generate the predictor function
    @trackpy.predict.predictor
    def predict(t1, particle):
        theta = cf.calc_angle(particle.pos[0], particle.pos[1], xf, yf)
        r = cf.calc_radius(particle.pos[0], particle.pos[1], xf, yf)
        
        new_theta = theta + rot_velocity * guass(r) * (t1 - particle.t)
        new_theta %= 360.0
        new_x = cf.calc_x_from_polar(r, new_theta, xf)
        new_y = cf.calc_y_from_polar(r, new_theta, yf)
        return np.array((new_x,new_y))
        
    
    # Track the data and restructure the resulting DataFrame
    trackpy.link_df(data_frame, search_range, memory=memory, pos_columns=['x pos', 'y pos'],
                    retain_index=True, link_strategy='numba', predictor=predict, **kwargs)
    data_frame['track id'] = data_frame['particle']
    del data_frame['particle']
def track_tips_in_folder(nb_dir, 
	h, mem, search_range, width, height, log_dir=None, out_dir=None,  **kwargs):
	'''nb_dir is the notebook directory containing the folder, Data, and 
	nb_dir is unused if log_dir and out_dir are both not None.
	string log_dir = folder containing the tip logs
	string out_dir = folder containing the tip logs
	'''
	if log_dir is None:
		log_dir = f"{nb_dir}/Data/ds_5_param_set_8/Log"
	if out_dir is None:	
		out_dir = f"{nb_dir}/Data/ds_5_param_set_8/trajectories"
	
	distance_L2_pbc = get_distance_L2_pbc(width=width,height=width)
	df['frame'] = df['t']/h
	df = df.astype(dtype={'frame':int}).copy()
	link_kwargs = {
	    'neighbor_strategy' : 'BTree',
	    'dist_func'         : distance_L2_pbc,
	    'memory': mem}

	#compute all _processed.csv tip logs in the Log folder
	for root, dirs, files in os.walk(".", topdown=False):
	    for name in dirs:
	        print(os.path.join(root, name))
	    for name in files:
	        os.chdir(log_dir)
	        df_dir = os.path.join(root, name)
	        if df_dir.find('_processed.csv') !=-1:
	            print(f"starting on {df_dir}...")
	            df = pd.read_csv(data_dir)
	            df['frame'] = df['t']/h
	            df = df.astype(dtype={'frame':int}).copy()
	            # test whether data has no odd spiral tips since the data has periodic boundary conditions
	            if (np.array(list(set(df.n.values)))%2==1).any():
	                print(f'WARNING: an odd spiral tips exists in \n\t{fn}')
	            
	            compute trajectories (slowest part)
	            traj = trackpy.link_df(f=df,search_range=search_range,t_column='frame', **link_kwargs)
	            
	            #save results
	            os.chdir(out_dir)
	            save_fn = os.path.basename(df_dir).replace('_processed.csv', f'_traj_sr_{search_range}_mem_{mem}.csv')
	            traj.to_csv(save_fn, index=False)
	return True

####################################################
# TODO: Command line prompt
####################################################

#TODO: make a command line interface for process_tip_log
# def main():
#   save_dir = data_dir.replace('tip_log','tip_positions')

# if __name__ == '__main__':
#   main()
Exemple #26
0
def link(tracks: pd.DataFrame,
         filter_stubs: int = 0,
         **kwargs) -> pd.DataFrame:
    df = trackpy.link_df(tracks, **kwargs)

    if filter_stubs != 0:
        f = trackpy.filter_stubs(df, filter_stubs)
    else:
        f = df

    return f
Exemple #27
0
def add_linking_data_to_csv(datafile):
    """
    Adds a 'particle' column to tracking data CSV according to global parameters.
    Actually replaces the existing CSV with another one which includes particle linking.
    Does nothing if datafile does not have a .csv extension.
    :param datafile: A path to the file to be processed.
    """
    if datafile.endswith('.csv'):
        data = pd.read_csv(datafile)
        data = tp.link_df(data, MAX_PIXELS_BW_FRAMES, memory=TRACKING_MEMORY)
        data.to_csv(datafile)
    def _link_trajectories(self):
        """Implements the trackpy functions link_df and filter_stubs"""
        # Trackpy methods
        self.data.particle_data = trackpy.link_df(
            self.data.particle_data,
            self.parameters['max frame displacement'],
            memory=self.parameters['memory'])
        self.data.particle_data = trackpy.filter_stubs(
            self.data.particle_data, self.parameters['min frame life'])

        # Save DataStore
        self.data.save()
    def _link_trajectories(self):
        """Implements the trackpy functions link_df and filter_stubs"""
        # Reload DataStore
        with dataframes.DataStore(self.data_filename) as data:
            # Trackpy methods
            data.reset_index()
            data.df = trackpy.link_df(
                data.df,
                self.parameters['max frame displacement'],
                memory=self.parameters['memory'])

            data.df = trackpy.filter_stubs(data.df,
                                           self.parameters['min frame life'])
            data.set_frame_index()
def tracking(video):
    print "running tracking"
    pimsFrames = pims.Video(video, as_grey=True)
    cells = []
    track = []
    for frame in pimsFrames[:]:
        f = tp.locate(frame, 301, invert=False, minmass=2000)
        t = tp.link_df(f, 5)  #remember cells after they left frame
        tp.annotate(f, frame)
        cells += f
        track += t
        print t.head()
    tp.plot_traj(t)
    return t.head()
def tracking(video):
    print "running tracking"
    pimsFrames = pims.Video(video, as_grey = True)
    cells = []
    track = []
    for frame in pimsFrames[:]:
        f = tp.locate(frame, 301, invert=False, minmass = 2000)
        t = tp.link_df(f, 5) #remember cells after they left frame
        tp.annotate(f, frame)
        cells += f
        track += t
        print t.head()
    tp.plot_traj(t)
    return t.head()
Exemple #32
0
def trackpy_rot_motion_linker(data_frame, search_range, rot_velocity=0.0, memory=0, theta_lim_bias=[0,360], **kwargs):
    '''A wrapper for trackpy linking that includes a predictor for rotating particles

    :params data_frame: DataFrame containing all the particle position information
    :params search_range: Max distance a particle can move between frames
    :params rot_velocity: The bias (in degrees) that a candidate particle should be
    found at. This is positive for positive L's
    :params memory: The number of frames a particle can disappear for and still be 
    considered the same particle.
    :params theta_lim_bias: The limits in degrees theta to apply the rotational bias.
    If a particle is outside this range then no bias is applied.
    :params kwrgs: Additional keyword arguments passed to trackpy.link_df
    '''

    # Find the particle locations in polar coords
    xf, yf, rf = least_sq_fit_circle(data_frame)
    polar_coor_data_frame(data_frame, xf, yf)

    # Generate the predictor function
    @trackpy.predict.predictor
    def predict(t1, particle):
        theta = calc_angle(particle.pos[0], particle.pos[1], xf, yf)
        r = calc_radius(particle.pos[0], particle.pos[1], xf, yf)
        if theta_lim_bias[0] < theta < theta_lim_bias[1]:
            new_theta = theta + rot_velocity * (t1 - particle.t)
            new_theta %= 360.0
            new_x = calc_x_from_polar(r, new_theta, xf)
            new_y = calc_y_from_polar(r, new_theta, yf)
            return np.array((new_x,new_y))
        else:
            return np.array((particle.pos[0], particle.pos[1]))
    
    # Track the data and restructure the resulting DataFrame
    trackpy.link_df(data_frame, search_range, memory=memory, pos_columns=['x pos', 'y pos'],
                    retain_index=True, link_strategy='numba', predictor=predict, **kwargs)
    data_frame['track id'] = data_frame['particle']
    del data_frame['particle']
Exemple #33
0
    def particle_tracking(self, search_range, length_cutoff, **kwargs):
        '''Tracking method. One must run particle detection first before this. 
        
        search_range : the max distance that two particles will be joined in one track
        length_cutoff: the min length (in frames) that a track must have
        **kwargs     : other parameters passed to trackpy.link_df method

        returns: pandas.DataFrame containing the tracks
        '''
        #must use a dataframe containing true dots only
        particles = pd.concat([item[item.True_particle] for item in self.particle_dfs])
        tracks    = tp.link_df(particles, search_range=search_range, **kwargs)
        if length_cutoff > 0:
            tracks = tp.filter_stubs(tracks, length_cutoff)
        self.tracks = tracks
        return tracks
Exemple #34
0
 def test_everything(self):
     """
     End to end test.
     Test the whole tracking pipeline from input file to particle and trap
     positions in the output file.
     """
     filename = "../../examples/data/test_example.twv"
     frames = pims.open(filename)
     times, laser_powers, traps = frames.get_all_tweezer_positions()
     features = tp.batch(frames, 25, minmass=1000, invert=False)
     tracks = tp.link_df(features, 15, memory=10)
     save_tracked_data_pandas(filename[:-4] + '_out.dat', frames, tracks, times, laser_powers, traps)
     with open(filename[:-4] + '_out.dat', 'r') as calculated_file:
         with open(filename[:-4] + '_expected.dat', 'r') as expected_file:
             for calculated, expected in zip(calculated_file, expected_file):
                 self.assertEqual(calculated, expected)
Exemple #35
0
def compute_traj(filename):

    vid = pims.Video('../test_video/' + filename)
    frames = as_grey(vid)

    midpoint = len(frames) / 2
    start = int(midpoint - 60)
    stop = int(midpoint + 60)

    f = tp.batch(frames[start:stop],
                 11,
                 invert=False,
                 minmass=160,
                 maxsize=3.0,
                 engine="numba")

    t = tp.link_df(f, 5, memory=3)

    t1 = tp.filter_stubs(t, 60)
    # Compare the number of particles in the unfiltered and filtered data.
    print('Before:', t['particle'].nunique())
    print('After:', t1['particle'].nunique())

    data = []
    for item in set(t1.particle):
        sub = t1[t1.particle == item]
        dvx = np.diff(sub.x)
        dvy = np.diff(sub.y)
        for x, y, dx, dy, frame, mass, size, ecc, signal, raw_mass, ep in \
        zip(sub.x[:-1], sub.y[:-1], dvx, dvy, sub.frame[:-1], sub.mass[:-1], sub['size'][:-1], sub.ecc[:-1], sub.signal[:-1], sub.raw_mass[:-1], sub.ep[:-1]):
            data.append({
                'dx': dx,
                'dy': dy,
                'x': x,
                'y': y,
                'frame': frame,
                'particle': item,
                'size': size,
                'ecc': ecc,
                'signal': signal,
                'mass': mass,
                'raw_mass': raw_mass,
                'ep': ep
            })
    df = pd.DataFrame(data)
    df.to_csv('../csvs/extract.csv')
def track_tips (df_tips,
	h, search_range, mem, width, height,dist_mode='pbc',  **kwargs):
	'''using periodic boundary conditions, take output of process_tip_log() and return a dataframe of tip trajectories'''
	distance_L2_pbc = get_distance_L2_pbc(width=width,height=height)
	df['frame'] = df['t']/h
	if dist_mode=='pbc':
		link_kwargs = {
		    'neighbor_strategy' : 'BTree',
		    'dist_func'         : distance_L2_pbc,
	    	'memory': mem}
	else:
		link_kwargs = {
		    'neighbor_strategy' : 'BTree',
		    'dist_func'         : None,
		    'memory': mem}
	df_trajectories = trackpy.link_df(f=df,search_range=search_range,t_column='frame', **link_kwargs)
	return df_trajectories
Exemple #37
0
def get_data(outdir):
    """ Loads the output of the preprocessing steps for feature extraction
        Returns the formatted data
    """
    frames = pims.ImageSequence("../"+outdir+"/*tif")
    print(frames)

    # particle diameter
    diam = 11
    features = tp.batch(frames[:frames._count], diameter=diam, minmass=1, invert=True)
    # Link features in time: sigma_(max)
    search_range = diam-2
    # r, g, b images are loaded
    lframes = int(np.floor(frames._count/3))
    # default max 15% frame count
    imax = int(np.floor(15*lframes/100))
    t = tp.link_df(features, search_range, memory=imax)
    # default neighbour strategy: KDTree

    # Filter spurious trajectories
    # default min 10% frame count
    imin = int(np.floor(10*lframes/100))
    # if seen in imin
    t1 = tp.filter_stubs(t, imin)

    # Compare the number of particles in the unfiltered and filtered data.
    print("Unique number of particles (Before filtering):", t["particle"].nunique())
    print("(After):", t1["particle"].nunique())

    # export pandas data frame with filename being current date and time
    timestr = time.strftime("%Y%m%d-%H%M%S")
    data = pd.DataFrame({"x": t1.x, "y": t1.y, "z": t1.frame, "mass": t1.mass, "size": t1.size, "ecc": t1.ecc, "signal": t1.signal, "ep": t1.ep, "particle": t1.particle})

    file_name = "../features_" + timestr + ".csv"
    print("Exporting %s" % (file_name))
    data.to_csv(file_name, sep="\t", encoding="utf-8")
    return data
def process_segmented_stack(cells_analysis, output_dir, image_stack_in, segmented_stack_in, resolution, segmented_rna_points = None, search_distance = None, maximum_memory = None, track_length = None):
  """
    Track the segmented cells. Generate a floder for every tracked cell where
    the masked and cropped cell frames will reside. Only cells that appear
    in 98% frames will be considered. For these cells, the missing frames
    be filled with the segmentation information of the nearest identified
    frame. 


    Arguments:
      cells_analysis: A csv file generated from imageJ containing metadata 
        about the segmented paticles. Columns must include the series 'X', 'Y',
        "Slice", "BX", "BY", "Width", and "Height"

      output_dir: The directory that will contain the cropped segmented cell.
        Every cell will have its own directory with the name /output_dir/cell_<id>

      image_stack_in: a prefix to the image files 
      
      segmented_stack_in: a prefix to to the segmentation files. All values greater than one are foreground.

      resolution: A tuple specifiying the image resolution in X and Y dimensions 
        If not passed the resolution will be infered from the image_stack metadata.

      segmented_rna_points: A csv file that contains the information about the
        segmented RNA transcription points. The format should be x, y, intensity,
        frame.  This script will collect the points that falls within a given
        cell bounding box (per frame) and saves a csv file with these points in in
        the corresponding folder of the cell. The points will saved in image
        indexes.

  """

  #some constants

  # The extension ratio for the cropped cells 
  extention = 1.10
  if maximum_memory == None:
    maximum_memory = 3
  
  if search_distance == None:
    search_distance = 25

  #Verify the inputs: 
  check_file(cells_analysis)

  #Get information about the segmented cells
  tracks_info = pandas.DataFrame.from_csv(cells_analysis)
  tracks_info.rename(columns={'X': 'x', 'Y': 'y', 'Slice' : 'frame'}, inplace=True)

  minimum_frame = int(tracks_info.frame.min()) 
  maximum_frame = int(tracks_info.frame.max())

  image_files = glob.glob(image_stack_in + "*")
  image_files.sort()
  if len(image_files)  < 1:
    raise Exception("Can not find any file that matches the prefix:{0}".format(image_stack_in))
  first_file = image_files[0]
  last_file = image_files[-1]

  
  m_first = re.match(r"{0}(?P<id_extension>.*)".format(image_stack_in), first_file)
  first_id, extension = os.path.splitext(m_first.group('id_extension'))


  m_last = re.match(r"{0}(?P<id_extension>.*)".format(image_stack_in), last_file)
  last_id, extension = os.path.splitext(m_last.group('id_extension'))

  if len(first_id) != len(last_id):
    raise Exception("The number of digits representing the id should match. Got {0} and {1}".format(first_file, last_file))

  ndigits = len(first_id) 

  first_frame = int(first_id)
  last_frame = int(last_id)
  n_frames = last_frame - first_frame + 1


  #Make sure all the image files and the segmentation files between first_id and last_id exists

  for frame_id in range (first_frame, last_frame + 1):
    image_file = image_stack_in + str(frame_id).zfill(ndigits) + extension
    mask_file = segmented_stack_in + str(frame_id).zfill(ndigits) + extension
    check_file(image_file, file_list = image_files)
    try:
      check_file(mask_file)
    except Exception as e:
      raise Exception("The image file {0} does not have a matching mask file {1}. Exception:{2} ".format(image_file, mask_file, e))


  if minimum_frame < first_frame:  
    raise Exception("The analysis file contains a frame {0} that has an ID less than the first input id:{1}".format(minimum_frame, first_frame))

  if maximum_frame > first_frame + n_frames - 1:  
    raise Exception("The analysis file contains a frame {0} that has an ID greater than the last input id:{1}".format(maximum_frame, first_frame+n_frames))

  if segmented_rna_points != None:
    check_file(segmented_rna_points)

  #percentage of frames where the tracked cell should exist to be processed


  if track_length == None:
    in_percentage =   0.98
    minimum_frames_per_cell = int(math.ceil(in_percentage * n_frames)) 
  else: 
    if not isinstance(track_length, int):
      raise Exception("The minimum track length should be a positive interger. Got{0}".format(track_length)) 
    elif track_length < 1:
      raise Exception("The minimum track length should be a positive interger. Got{0}".format(track_length)) 
    else:
      minimum_frames_per_cell = track_length

  print "Minimum number of frames where the cell should exist is {0}".format(minimum_frames_per_cell)
  if minimum_frames_per_cell >  n_frames:
    raise Exception ("ERROR: The minimum number of frames:{0} is greater than the number of frames:{1}."\
      .format(minimum_frames_per_cell, n_frames))

  #Make sure the image resolution is passed
  x_voxel_size = float(resolution[0])
  y_voxel_size = float(resolution[1])

  print "Image resolution is X:{0}, Y:{1}".format(x_voxel_size, y_voxel_size)
  # Create the output directory
  if not os.path.exists(output_dir):
    os.mkdir(output_dir) 

  #Parse the RNA file and rename its columns.
  RNAs = None
  if segmented_rna_points != None:
    RNAs = pandas.read_csv(segmented_rna_points)
    RNAs.rename(columns={'x':'org_x', 'y':'org_y', 'intensity':'rna_intensity'}, inplace = True)
    RNAs.frame = RNAs.frame.apply(int)


  #Make the search range proportional to the resolution 
  physical_search_range = search_distance * math.sqrt(x_voxel_size ** 2 + y_voxel_size ** 2)
  tracks = trackpy.link_df(tracks_info, search_range =  physical_search_range, memory = maximum_memory)
  cell_ids = tracks.particle.unique()
 
  #Pick the best tracked cells.
  sizes=[]
  good_ids = []
  for x in np.nditer(cell_ids):
      n_frames_for_cell = tracks[tracks.particle == int(x)].shape[0]
      sizes.append(n_frames_for_cell)
      if n_frames_for_cell > minimum_frames_per_cell:
          good_ids.append(int(x))
      
  print "Found {0} cells to process".format(len(good_ids))

  good_ids = [int(i) for i in good_ids] 

  #Loop over all the cells
  iteration = 1
  n_cells = len(good_ids)



  for cell_id in good_ids:

    print "processing cell {0} out of {1}, cell-id:{2}".format(iteration, n_cells, cell_id) 
    #Create the cell directory 
    cell_dir = os.path.join(output_dir, "cell-{0}".format(cell_id))
    if not os.path.exists(cell_dir):
      os.mkdir(cell_dir)
    else:    
      iteration += 1
      continue

    # To get the bounding box will be in image spacing between (BX, BY) and (BY + Width, BY + Height) 
    # Get this information for cell 1
    cell_info = tracks[tracks.particle == cell_id].loc[:,['x','y', 'frame', 'particle', 'BX', 'BY', 'Width', 'Height']]
    
    #LX and LY are the right lower borders of the segmented cell. 
    cell_info['LX'] = cell_info.BX + cell_info.Width
    cell_info['LY'] = cell_info.BY + cell_info.Height

    # Convert the physical indexes to pixel coordinates for cropping
    cell_info['Left'] = cell_info.BX  / x_voxel_size
    cell_info.Left = cell_info.Left.apply(math.floor).apply(int)
    cell_info['Upper'] = cell_info.BY  / y_voxel_size
    cell_info.Upper = cell_info.Upper.apply(math.floor).apply(int)
    cell_info['Right'] = cell_info.LX  / x_voxel_size
    cell_info.Right = cell_info.Right.apply(math.ceil).apply(int)
    cell_info['Lower'] = cell_info.LY  / y_voxel_size
    cell_info.Lower = cell_info.Lower.apply(math.ceil).apply(int)

    #Create a csv file to put the bounding box corresponding to every frame from the original image. 
    frames_info_file = os.path.join(cell_dir,"frames-info.csv")
    cell_info.to_csv(frames_info_file, index = False)

    new_max_width = int(math.ceil(cell_info.Width.max() / x_voxel_size) * extention)
    new_max_height = int(math.ceil(cell_info.Height.max() / y_voxel_size) * extention)

    #Initialize the per cell RNA Dataframe
    if RNAs is not  None:
      new_x = 'x'
      new_y = 'y'
      rna_spots_info = pandas.DataFrame(columns=[new_x, new_y, 'frame', 'org_x', 'org_y', 'rna_intensity'])  
      #print "Before analyzing the cell, number of spots info = {0}".format(len(rna_spots_info.index))

    #Loop over all the frames
    #The first frame in the analysis file start with 1. However, this might not be the case for  
    # images


    for frame_id in range(first_frame,n_frames + first_frame):

      print "processing frame_id: {0}".format(frame_id)

      #Get the frame if it is part of the track, otherwise get the information
      # from the closest frame. 

      frame_info_df =  cell_info[cell_info.frame == frame_id]
      if len(frame_info_df.index) > 0:
        frame_info = cell_info[cell_info.frame == frame_id].iloc[0]
      else: 
        closest_row_id = (cell_info.frame - frame_id).apply(abs).argmin()  
        frame_info = cell_info.loc[closest_row_id]

        print("Warning: cell:{0} frame_id:{1} not found, using frame_id:{2} instead."\
          .format(cell_id, frame_id, int(frame_info.frame)))

      try: 
        left =  int(frame_info.Left)
        right = int(frame_info.Right)
        upper = int(frame_info.Upper)
        lower = int(frame_info.Lower)
        bounding_box = (int(frame_info.Left), int(frame_info.Upper), \
          int(frame_info.Right), int(frame_info.Lower))
      except:
        print "Can not calculate the bounding box for frame_id=" , frame_id
        raise
      
      #Get the RNA points that fall within the bounding box for this frame    
      if RNAs is not None:

        #Get all the RNA points that satisfy: frame = frame_id, and
        #frame_info.Left < X < frame_info.Right,and
        #frame_info.Upper < Y < frame_info.Lower 

        frame_filter = RNAs[(RNAs.frame == frame_id)]
        x_filter = frame_filter[(frame_filter.org_x > left) & ( frame_filter.org_x < right)]
        frame_cell_points = x_filter[(x_filter.org_y > upper) & (x_filter.org_y < lower)] 

        if len(frame_cell_points.index) != 0:
          #Calculate the new index for the RNA transcription points using the formulat x_new = (x_old - frame_info.x) / x_voxel_size  + new_max_width / 2.0 
          #Note that everything is in image dimention at this point
          print "found RNA spots" 

          x_df = (frame_cell_points.loc[:,'org_x'] - frame_info.x / x_voxel_size + new_max_width / 2.0).apply(int)
          frame_cell_points.loc[:,new_x] = x_df 
          frame_cell_points.loc[:,new_y] = \
            (frame_cell_points.loc[:,'org_y'] - frame_info.y / y_voxel_size + new_max_height / 2.0).apply(int)

          rna_spots_info = pandas.concat([rna_spots_info, frame_cell_points])   

      if 1:
        # Move the image object to the frame_id.
        image_array = skimage.io.imread(image_stack_in + str(frame_id).zfill(ndigits) + extension)

  
        #Move the mask to the closest frame id (which can be the frame_id)
        mask_array = skimage.io.imread(segmented_stack_in + str(int(frame_info.frame)).zfill(ndigits) + extension)
  
        # Crop the bounding box from the image and the mask 
        cropped_image = image_array[upper:lower,left:right]
        cropped_mask = mask_array[upper:lower,left:right]

        #Make sure cropped_mask includes one area only
        all_labels, num= measure.label(cropped_mask, return_num = True)

        #print "Warning, found:{0} regions in the mask corresponding to frame:{1}. Filtering out the small regions"\
        #  .format(num,frame_id)
        properties = measure.regionprops(all_labels) 
        details = [(p.area, p.label) for p in properties]
        largest_region_tuple = max(details, key = lambda t: t[0]) 
        largest_region_id = largest_region_tuple[1]
        #pdb.set_trace()
        cropped_image[all_labels != largest_region_id] = 0

  
        #Create the masked image
        #mask_ids = cropped_mask < 1
        #cropped_image[mask_ids] = 0

        #Extend the image
        expanded_image = np.zeros((new_max_height,new_max_width), dtype=cropped_image.dtype)

        #Paste the cropped image in the extended image]
        height_offset =  (new_max_height - cropped_image.shape[0]) / 2
        width_offset = (new_max_width - cropped_image.shape[1]) /2  
        expanded_image[height_offset:height_offset+cropped_image.shape[0],\
          width_offset:width_offset+cropped_image.shape[1]]  = cropped_image

        output_file_name = os.path.join(cell_dir,"fragment-{0}.tif".format(str(frame_id).zfill(ndigits)))
        skimage.external.tifffile.imsave(output_file_name, expanded_image)
   
    #Write the csv file for the RNAs spots 
    if RNAs is not None: 
      rna_info_file = os.path.join(cell_dir,"rna-info.csv")
      rna_spots_info.to_csv(rna_info_file, index = False, columns = ['frame', new_x, new_y, 'org_x', 'org_y', 'rna_intensity'] )
      print "Found {0} RNA spots".format(len(rna_spots_info.index))
  
    iteration = iteration + 1
Exemple #39
0
            continue
        if region.major_axis_length < 50 or region.major_axis_length > 90:
            continue
        if region.minor_axis_length < 4 or region.minor_axis_length > 7:
            continue
        feature = feature + 1
        
#        minr, minc, maxr, maxc = region.bbox
#        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
#                              fill=False, edgecolor='red', linewidth=1)
        elli = mpatches.Ellipse([region.centroid[1],region.centroid[0]],region.major_axis_length,
                                region.minor_axis_length,-region.orientation/6.28*360,fill=False,
                                edgecolor='red',linewidth=2)
#        ax.add_patch(rect)
#        plt.draw()
        ax.add_patch(elli)
        plt.draw()
        features = features.append([{'y':region.centroid[0],
                        'x':region.centroid[1],
                        'theta':-region.orientation,
                        'frame':num,}])
    print('%d features found in frame %d' % (feature,num))
features.to_csv(os.path.join(datapath,'initial_tracking.dat'))
ti2 = time.time()

t = tp.link_df(features,40,memory=5) # can disappear at most 5 frames
t1 = tp.filter_stubs(t, 30) # appear at least in 50 frames
t1.to_csv(os.path.join(datapath,'trajectory.dat'))
print('###########################################')
print('Tracking finished: %f seconds for %d frames' % ((ti2 - ti1),nframe))
Exemple #40
0
def frames2coords(frames,out_fh,
                  params_locate,params_msd,params_link_df={'search_range':20,},
                  mass_cutoff=0.5,size_cutoff=0.5,ecc_cutoff=0.5,
                    filter_stubs=True,flt_mass_size=True,flt_incomplete_trjs=True,
                    force=False,test=False):
    dns=['f_batch','t','t1','t2']
    dn2dp={dn:f'{out_fh}.{dn}.tsv' for dn in dns}
    dn2df={}
    if not exists(dn2dp['t2']) or force:
        if not exists(dn2dp['t']) or force:
            dn2df['f_batch']=tp.batch(frames,engine='numba',**params_locate)
            dn2df['t']=tp.link_df(dn2df['f_batch'], **params_link_df)
            print(params_link_df)
            dn2df['f_batch'].to_csv(dn2dp['f_batch'],sep='\t')
            dn2df['t'].to_csv(dn2dp['t'],sep='\t')
        else:
            dn2df['t']=pd.read_csv(dn2dp['t'])
        max_lagtime_stubs=params_msd["max_lagtime"]*params_msd["fps"]
        if filter_stubs:
            dn2df['t1'] = tp.filter_stubs(dn2df['t'], max_lagtime_stubs*1.25)
            logging.info('filter_stubs: particle counts: %s to %s' % (dn2df['t']['particle'].nunique(),dn2df['t1']['particle'].nunique()))
            if t1['particle'].nunique()==0:
                logging.error('filter_stubs: particle counts =0; using less stringent conditions')
                dn2df['t1'] = tp.filter_stubs(dn2df['t'], max_lagtime_stubs*1)
        else:
            dn2df['t1'] = dn2df['t'].copy()

        if test:        
            fig=plt.figure()
            ax=plt.subplot(111)
            tp.mass_size(dn2df['t1'].groupby('particle').mean(),ax=ax);
            plt.tight_layout()
            plt.savefig('%s.mass_size.svg' % out_fh,format='svg')        
        if flt_mass_size:
            dn2df['t2'] = dn2df['t1'][((dn2df['t1']['mass'] > dn2df['t1']['mass'].quantile(mass_cutoff)) & (dn2df['t1']['size'] < dn2df['t1']['size'].quantile(size_cutoff)) &
                     (dn2df['t1']['ecc'] < ecc_cutoff))]
            logging.info('filter_mass_size: particle counts: %s to %s' % (dn2df['t1']['particle'].nunique(),dn2df['t2']['particle'].nunique()))
            if len(t2)==0:
                dn2df['t2'] = dn2df['t1'].copy()
                logging.warning('filter_mass_size produced 0 particles; using t2=t1.copy()')
        else:
            dn2df['t2'] = dn2df['t1'].copy()
        if test:        
            fig=plt.figure()
            ax=plt.subplot(111)
            tp.mass_size(dn2df['t2'].groupby('particle').mean(),ax=ax);
            plt.tight_layout()
            plt.savefig('%s.mass_size_post_filtering.svg' % out_fh,format='svg')        
        if flt_incomplete_trjs:
            dn2df['t2']=dn2df['t2'].reset_index()
            vals=pd.DataFrame(dn2df['t2']['particle'].value_counts())
            partis=[i for i in vals.index if vals.loc[i,'particle']>=int(vals.max())*0.95 ]
            dn2df['t2']=dn2df['t2'].loc[[i for i in dn2df['t2'].index if (dn2df['t2'].loc[i,'particle'] in partis)],:]
        dn2df['t2'].to_csv(dn2dp['t2'],sep='\t')
    else:
        dn2df['t2']=pd.read_csv(dn2dp['t2'],sep='\t')
    if test:
        for traj in ['t','t1','t2']:
            ax=plot_traj(frames[-1],traj=dn2df[traj])
        logging.info('getting plots hist')
        cols=['mass','size','ecc','signal','raw_mass','ep']
        fig=plt.figure()
        ax=plt.subplot(111)
        _=dn2df['t2'].loc[:,cols].hist(ax=ax)        
    return dn2df['t2']
Exemple #41
0
def tracking(video):
    # print "running tracking"
    pimsframes = pims.Video(video, as_grey = True)
    fgbg = cv2.BackgroundSubtractorMOG()
    framesmask = []
    framecount = 0
    blurredframes = []
    # HACK to flip if has space in name. #TODO get all videos correctly alligned...
    if " " in video:
        pimsframes = [p[:, ::-1] for p in pimsframes]

    pimsframes = [frame[:,400:] for frame in pimsframes]
    for frame in pimsframes:
        # frame = cv2.GaussianBlur(frame,(9,9),0)
        # frame = cv2.medianBlur(frame, 7)
        # if align remove
        frame = cv2.GaussianBlur(frame,(11,11),7)
        frame = cv2.medianBlur(frame, 3)
        fgmask = fgbg.apply(frame, learningRate=1.0/history)
        framesmask.append(fgmask)
        framecount += 1
        blurredframes.append(frame)

    background_sub = [m * frame for m,frame in zip(framesmask, pimsframes)]
    if False:
        for i in range(100):
            cv2.imshow("asdf", background_sub[i])
            cv2.imshow("mask", framesmask[i])
            cv2.imshow("orig", pimsframes[i])
            cv2.imshow("blur", blurredframes[i])
            cv2.waitKey(0)
    # for i, f in enumerate(framesmask):
    #     half_show("asdf", f)
    #     cv2.waitKey(0)
    #     print i
    cells = []
    track = []

    to_track = background_sub
    minmass = 3000

    f = tp.batch(to_track[:], 11, minmass=minmass, invert=False, noise_size=3)
    # for j in range(20,100):
    #     f = tp.locate(to_track[j], 11, invert=False, minmass = minmass, noise_size=3)
    #     plt.figure(1)
    #     tp.annotate(f, to_track[j])
    #     plt.show()
    # ipdb.set_trace()
    print "linking"
    try:
        # t = tp.link_df(f, 100, memory=3)
        t = tp.link_df(f, 100, memory=1)

    except Exception:
        print "FAILED on", video
        return None
    print "done"
    # plt.figure(2)
    # tp.plot_traj(t)
    # plt.show()
    return t
Exemple #42
0
 def link_df(self, *args, **kwargs):
     kwargs.update(self.linker_opts)
     kwargs['diagnostics'] = self.do_diagnostics
     return tp.link_df(*args, **kwargs)
Exemple #43
0
 def link_df(self, *args, **kwargs):
     kwargs.update(self.linker_opts)
     return tp.link_df(*args, **kwargs)