def delete_spurious_trajectories(self, subject='particles', plot=True, export=True):
        """

        :param subject:
        :return:
        """
    ### Deletion of spurious trajectories under 'minpoints' points ###
        if subject == 'particles':
            editable_list = self.particle_trajectory_list
        elif subject == 'algae':
            editable_list = self.algae_trajectory_list
        else:
            raise Exception('The argument subject of the current method '
                            '(delete_spurious_trajectories) must be either particles or algae')

        editable_list.set_all_trajectories_dataframe(
            tp.filter_stubs(
                editable_list.all_trajectories_dataframe, self.minpoints
            )
        )
        editable_list.update_item_list()
        if plot:
            tp.plot_traj(editable_list.all_trajectories_dataframe, label=True)
            plt.show()
        if export:
            editable_list.all_trajectories_dataframe.to_csv(self.path + '\\t_' + subject + '.csv', index=None, header=True)
        return editable_list
Example #2
0
    def link_trajectories(self, f_index=None):

        """Implements the trackpy functions link_df and filter_stubs"""
        # Reload DataStore
        if f_index is None:
            'When processing whole video store in file with same name as movie'
            data_filename = self.data_filename
        else:
            'store temporarily'
            data_filename = self.data_filename[:-5] + '_temp.hdf5'

        with dataframes.DataStore(data_filename, load=True) as data:
            if f_index is None:
                # Trackpy methods
                data.reset_index()
                data.df = trackpy.link_df(data.df, get_param_val(self.parameters['default']['max_frame_displacement']),memory=get_param_val(self.parameters['default']['memory']))
                data.df = trackpy.filter_stubs(data.df, get_param_val(self.parameters['default']['min_frame_life']))
            else:
                #Adds a particle id to single temporary dataframes for convenience
                num_particles = np.shape(data.df)[0]
                pids = np.linspace(0,num_particles-1, num=num_particles).astype(int)
                data.df['particle'] = pids

            # Save DataStore
            data.save(filename=data_filename)
Example #3
0
    def track_cell(self):
        det_df = pd.read_csv(self.config.OUTPUT_PATH + self.config.ROOT_NAME + \
            '-cellDetData.csv')

        blobs_df = tp.link_df(
            det_df,
            search_range=self.config.CELL_SEARCH_RANGE,
            memory=self.config.CELL_MEMORY,
        )
        blobs_df = tp.filter_stubs(blobs_df, 5)
        blobs_df = blobs_df.reset_index(drop=True)
        blobs_df = add_traj_length(blobs_df)

        traj_num_before = blobs_df['particle'].nunique()
        after_filter_df = blobs_df[
            blobs_df['traj_length'] >= self.config.CELL_TRAJ_LEN_THRES]
        print("######################################")
        print("Trajectory number before filters: \t%d" % traj_num_before)
        print("Trajectory number after filters: \t%d" %
              after_filter_df['particle'].nunique())
        print("######################################")

        blobs_df.round(6).to_csv(self.config.OUTPUT_PATH + self.config.ROOT_NAME + \
               '-cellPhysData.csv', index=False)

        self.config.save_config()
Example #4
0
def form_trajectories(loc):
	global particles,P,T,bends,track
	print
	print 'Forming worm trajectories...',
	data = {'x':[],'y':[],'frame':[],
			'eccentricity':[],'area':[],
            'minor_axis_length':[],
            'area_eccentricity':[]}
	for t,l in enumerate(loc):
		data['x'] += [d['centroid'][0] for d in l]
		data['y'] += [d['centroid'][1] for d in l]
		data['eccentricity'] += [d['eccentricity'] for d in l]
		data['area_eccentricity'] += [d['area_eccentricity'] for d in l]
		data['minor_axis_length'] += [d['minor_axis_length'] for d in l]
		data['area'] += [d['area'] for d in l]
		data['frame'] += [t]*len(l)
	data = pd.DataFrame(data)
	try:
		track = tp.link_df(data,search_range=max_dist_move,memory=memory)
	except tp.linking.SubnetOversizeException:
		print 'Linking problem too complex. Reduce maximum move distance or memory.'
		print 'Stopping.'
		exit()
	track = tp.filter_stubs(track, min([min_track_length,len(loc)]))
	try:
		trackfile = open('%strack.p'%save_as,'w')
		cPickle.dump(track, trackfile)
		trackfile.close()
	except:
		print 'Warning: no track file saved. Track too long.'
		print '         plot_path.py will not work on this file.'

	return track
Example #5
0
def form_trajectories(loc, settings):
    """Form worm trajectories."""
    print('Forming worm trajectories...', end=' ')
    data = {'x': [], 'y': [], 'frame': [],
            'eccentricity': [], 'area': [],
            'minor_axis_length': [],
            'area_eccentricity': []}
    for t, l in enumerate(loc):
        data['x'] += [d['centroid'][0] for d in l]
        data['y'] += [d['centroid'][1] for d in l]
        data['eccentricity'] += [d['eccentricity'] for d in l]
        data['area_eccentricity'] += [d['area_eccentricity'] for d in l]
        data['minor_axis_length'] += [d['minor_axis_length'] for d in l]
        data['area'] += [d['area'] for d in l]
        data['frame'] += [t] * len(l)
    data = pd.DataFrame(data)
    try:
        track = tp.link_df(data, search_range=settings["max_dist_move"],
                           memory=settings["memory"])
    except tp.linking.SubnetOversizeException:
        raise RuntimeError(
            'Linking problem too complex.'
            ' Reduce maximum move distance or memory.')
    track = tp.filter_stubs(track, min([settings["min_track_length"],
                                        len(loc)]))
    try:
        with open(os.path.join(settings["save_as"], 'track.p'),
                  'bw') as trackfile:
            pickle.dump(track, trackfile)
    except Exception:
        traceback.print_exc()
        print('Warning: no track file saved. Track too long.')
        print('         plot_path.py will not work on this file.')

    return track
Example #6
0
def get_data(raw_data_path, part_select_dict):
    """
    :param raw_data_path: string
    :return:
    """
    print("Processing file " + raw_data_path)
    data_filename = os.path.splitext(os.path.basename(raw_data_path))[0]
    data = pd.read_csv(raw_data_path)
    data = tp.link_df(data,
                      analyzer.MAX_PIXELS_BW_FRAMES,
                      memory=analyzer.TRACKING_MEMORY)
    print(
        str(len(data.particle.unique())) + " initial trajectories in " +
        raw_data_path)
    data = tp.filter_stubs(data, analyzer.MIN_TRACK_LENGTH)
    print(
        str(len(data.particle.unique())) + " trajectories span at least " +
        str(analyzer.MIN_TRACK_LENGTH) + " frames")
    data = analyzer.filter_particles_and_add_actual_size(
        data, data_filename, part_select_dict)
    print(str(len(data.particle.unique())) + " selected particles left")
    # drift = tp.compute_drift(data)
    # data = tp.subtract_drift(data, drift)
    # Plotting trajectories after drift cancelling
    # plt.figure().suptitle("Sample particle trajectories with drift")
    # tp.plot_traj(data)
    data = analyzer.cancel_avg_velocity_drift(data)
    # Plotting trajectories after drift cancelling
    # plt.figure().suptitle("Sample particle trajectories without drift")
    # tp.plot_traj(data)
    data = add_environment_variables(data, raw_data_path)
    return data
Example #7
0
def preProcessData(ball, max_displacement=20, memory_val=5, minTrajectory=9):
    '''
    This function takes a dataFrame with unsorted data and links the trajectories
    We then filter any trajectories that are too short to be useful and return the 
    updated dataFrame.
    
    Inputs: 
        ball - DataFrame with unsorted particle coordinates
        max_displacement - max distance in pixels a spot could have moved between frames
        memore_val - Number of frames a spot can be missing for
        minTrajectory - Shortest useful trajectory to keep
    
    Outputs:
        ball - dataFrame which now includes a particle number for each unique 
        trajectory and the spurious dots removed. spurious = not found for more than minTrajectory
        frames.
    '''

    tp.link_df(ball, max_displacement, memory=memory_val)

    #remove those dots that are only measured in a single frame
    ball = tp.filter_stubs(ball, threshold=minTrajectory)

    #ball.drop('frame',axis=1,inplace=True)

    #print(ball.head(n=20))

    return ball
Example #8
0
 def relevant_tracks(self):
     """ Returns the relevant tracks as filtered by length, eccentricity, size and mass. This step needs careful
     consideration and should definitely be used in post-processing or once the parameters have been validated.
     """
     locations = self.locations.copy()
     t1 = tp.filter_stubs(locations,
                          self.config['process']['min_traj_length'])
     t2 = t1[((t1['mass'] > self.config['process']['min_mass']) &
              (t1['size'] < self.config['process']['max_size']) &
              (t1['ecc'] < self.config['process']['max_ecc']))]
     return t2
Example #9
0
def link(tracks: pd.DataFrame,
         filter_stubs: int = 0,
         **kwargs) -> pd.DataFrame:
    df = trackpy.link_df(tracks, **kwargs)

    if filter_stubs != 0:
        f = trackpy.filter_stubs(df, filter_stubs)
    else:
        f = df

    return f
Example #10
0
    def _link_trajectories(self):
        """Implements the trackpy functions link_df and filter_stubs"""
        # Trackpy methods
        self.data.particle_data = trackpy.link_df(
            self.data.particle_data,
            self.parameters['max frame displacement'],
            memory=self.parameters['memory'])
        self.data.particle_data = trackpy.filter_stubs(
            self.data.particle_data, self.parameters['min frame life'])

        # Save DataStore
        self.data.save()
Example #11
0
    def __filter_trajectories(self):
        """ Filtering small trajectories. If trajectory is bigger than
        the value it will be removed from the set """

        filtered = tp.filter_stubs(self.trajectories, threshold=25)
        if len(filtered) > 50:
            """ For the case when there are a few trajectories we will return
            unchanged dataset. Because it will be affect the results. In case
            when there are a lot of tracks, we will remove the smaller ones
            to make the statistics more clean """
            return filtered

        return self.trajectories
Example #12
0
    def _link_trajectories(self):
        """Implements the trackpy functions link_df and filter_stubs"""
        # Reload DataStore
        with dataframes.DataStore(self.data_filename) as data:
            # Trackpy methods
            data.reset_index()
            data.df = trackpy.link_df(
                data.df,
                self.parameters['max frame displacement'],
                memory=self.parameters['memory'])

            data.df = trackpy.filter_stubs(data.df,
                                           self.parameters['min frame life'])
            data.set_frame_index()
Example #13
0
def fixed_filter_stubs(data_frame, i):
    """
		The function that fixes Trackpy's filter_stubs function by restoring the indices of the data_frame.

		Parameters:
			data_frame (DataFrame): The DataFrame of trajectory information generated by Trackpy.
			i (int): The minimum amount of frames the recoverable trajectories must persist for.

		Returns:
			t (Data_Frame): The DataFrame of the trajectory information of particles that exist in more frames than i.
	"""
    t = tp.filter_stubs(data_frame.copy(), i)
    t.index = range(0, len(t))

    return t
Example #14
0
    def particle_tracking(self, search_range, length_cutoff, **kwargs):
        '''Tracking method. One must run particle detection first before this. 
        
        search_range : the max distance that two particles will be joined in one track
        length_cutoff: the min length (in frames) that a track must have
        **kwargs     : other parameters passed to trackpy.link_df method

        returns: pandas.DataFrame containing the tracks
        '''
        #must use a dataframe containing true dots only
        particles = pd.concat([item[item.True_particle] for item in self.particle_dfs])
        tracks    = tp.link_df(particles, search_range=search_range, **kwargs)
        if length_cutoff > 0:
            tracks = tp.filter_stubs(tracks, length_cutoff)
        self.tracks = tracks
        return tracks
Example #15
0
def compute_traj(filename):

    vid = pims.Video('../test_video/' + filename)
    frames = as_grey(vid)

    midpoint = len(frames) / 2
    start = int(midpoint - 60)
    stop = int(midpoint + 60)

    f = tp.batch(frames[start:stop],
                 11,
                 invert=False,
                 minmass=160,
                 maxsize=3.0,
                 engine="numba")

    t = tp.link_df(f, 5, memory=3)

    t1 = tp.filter_stubs(t, 60)
    # Compare the number of particles in the unfiltered and filtered data.
    print('Before:', t['particle'].nunique())
    print('After:', t1['particle'].nunique())

    data = []
    for item in set(t1.particle):
        sub = t1[t1.particle == item]
        dvx = np.diff(sub.x)
        dvy = np.diff(sub.y)
        for x, y, dx, dy, frame, mass, size, ecc, signal, raw_mass, ep in \
        zip(sub.x[:-1], sub.y[:-1], dvx, dvy, sub.frame[:-1], sub.mass[:-1], sub['size'][:-1], sub.ecc[:-1], sub.signal[:-1], sub.raw_mass[:-1], sub.ep[:-1]):
            data.append({
                'dx': dx,
                'dy': dy,
                'x': x,
                'y': y,
                'frame': frame,
                'particle': item,
                'size': size,
                'ecc': ecc,
                'signal': signal,
                'mass': mass,
                'raw_mass': raw_mass,
                'ep': ep
            })
    df = pd.DataFrame(data)
    df.to_csv('../csvs/extract.csv')
Example #16
0
    def calculate_histogram(self):
        self.calculating_histograms = True
        locations = self.locations.copy()
        t1 = tp.filter_stubs(locations, self.config['process']['min_traj_length'])
        # t2 = t1[((t1['mass'] > self.config['process']['min_mass']) & (t1['size'] < self.config['process']['max_size']) &
        #          (t1['ecc'] < self.config['process']['max_ecc']))]
        im = tp.imsd(t1, self.config['process']['um_pixel'], self.config['process']['fps'])
        self.histogram_values = []
        for pcle in im:
            if general_stop_event.is_set():
                break

            data = im[pcle]
            t = data.index[~np.isnan(data.values)]
            val = data.values[~np.isnan(data.values)]
            try:
                slope, intercept, r, p, stderr = stats.linregress(np.log(t), np.log(val))
                self.histogram_values.append([slope, intercept])
            except:
                pass
        self.calculating_histograms = False
        self.publisher.publish('histogram', self.histogram_values)
Example #17
0
    def calculate_histogram(self):
        """ Starts a new thread to calculate the histogram of fit-parameters based on the mean-squared displacement of
        individual particles. It publishes the data on topic `histogram`.

        .. warning:: This method is incredibly expensive. Since it runs on a thread it can block other pieces of code,
        especially the GUI, which runs on the same process.

        .. TODO:: The histogram loops over all the particles. It would be better to skeep particles for which there is
            no new data

        .. TODO:: Make this method able to run on a separate process. So far is not possible because it relies on data
            stored on the class itself (`self.locations`).
        """
        self.calculating_histograms = True
        locations = self.locations.copy()
        t1 = tp.filter_stubs(locations,
                             self.config['process']['min_traj_length'])
        t2 = t1[((t1['mass'] > self.config['process']['min_mass']) &
                 (t1['size'] < self.config['process']['max_size']) &
                 (t1['ecc'] < self.config['process']['max_ecc']))]
        im = tp.imsd(t2, self.config['process']['um_pixel'],
                     self.config['process']['fps'])
        self.histogram_values = []
        for pcle in im:
            if general_stop_event.is_set():
                break

            data = im[pcle]
            t = data.index[~np.isnan(data.values)]
            val = data.values[~np.isnan(data.values)]
            try:
                slope, intercept, r, p, stderr = stats.linregress(
                    np.log(t), np.log(val))
                self.histogram_values.append([slope, intercept])
            except:
                pass
        self.calculating_histograms = False
        self.publisher.publish('histogram', self.histogram_values)
Example #18
0
    def track_mrna(self):
        det_df = pd.read_csv(self.config.OUTPUT_PATH + self.config.ROOT_NAME + \
            '-detData.csv')

        blobs_df = tp.link_df(
            det_df,
            search_range=self.config.mRNA_SEARCH_RANGE,
            memory=self.config.mRNA_MEMORY,
        )
        blobs_df = tp.filter_stubs(blobs_df, 5)
        blobs_df = blobs_df.reset_index(drop=True)
        blobs_df = add_traj_length(blobs_df)

        blobs_df_cut = blobs_df[['frame', 'x', 'y', 'particle']]
        blobs_df_cut = blobs_df_cut.apply(pd.to_numeric)
        im = tp.imsd(
            blobs_df_cut,
            mpp=self.config.PIXEL_SIZE,
            fps=self.config.FRAME_RATE,
            max_lagtime=np.inf,
        )

        blobs_df = get_d_values(blobs_df, im, self.config.mRNA_DIVIDE_NUM)
        blobs_df = blobs_df.apply(pd.to_numeric)

        traj_num_before = blobs_df['particle'].nunique()
        after_filter_df = blobs_df[
            blobs_df['traj_length'] >= self.config.mRNA_TRAJ_LEN_THRES]
        print("######################################")
        print("Trajectory number before filters: \t%d" % traj_num_before)
        print("Trajectory number after filters: \t%d" %
              after_filter_df['particle'].nunique())
        print("######################################")

        blobs_df.round(6).to_csv(self.config.OUTPUT_PATH + self.config.ROOT_NAME + \
               '-physData.csv', index=False)

        self.config.save_config()
Example #19
0
def get_data(outdir):
    """ Loads the output of the preprocessing steps for feature extraction
        Returns the formatted data
    """
    frames = pims.ImageSequence("../"+outdir+"/*tif")
    print(frames)

    # particle diameter
    diam = 11
    features = tp.batch(frames[:frames._count], diameter=diam, minmass=1, invert=True)
    # Link features in time: sigma_(max)
    search_range = diam-2
    # r, g, b images are loaded
    lframes = int(np.floor(frames._count/3))
    # default max 15% frame count
    imax = int(np.floor(15*lframes/100))
    t = tp.link_df(features, search_range, memory=imax)
    # default neighbour strategy: KDTree

    # Filter spurious trajectories
    # default min 10% frame count
    imin = int(np.floor(10*lframes/100))
    # if seen in imin
    t1 = tp.filter_stubs(t, imin)

    # Compare the number of particles in the unfiltered and filtered data.
    print("Unique number of particles (Before filtering):", t["particle"].nunique())
    print("(After):", t1["particle"].nunique())

    # export pandas data frame with filename being current date and time
    timestr = time.strftime("%Y%m%d-%H%M%S")
    data = pd.DataFrame({"x": t1.x, "y": t1.y, "z": t1.frame, "mass": t1.mass, "size": t1.size, "ecc": t1.ecc, "signal": t1.signal, "ep": t1.ep, "particle": t1.particle})

    file_name = "../features_" + timestr + ".csv"
    print("Exporting %s" % (file_name))
    data.to_csv(file_name, sep="\t", encoding="utf-8")
    return data
Example #20
0
def nd2msd(nd_fh):
    # print nd_fh
    frames=pims.ND2_Reader(nd_fh)
    logging.info('number of frames = %d' % len(np.shape(frames)))
    if len(np.shape(frames))==4:
        frames = average_z(frames)
    threshold=np.percentile(frames,75)
    f_batch = tp.batch(frames,diameter=11,threshold=threshold)

    t = tp.link_df(f_batch, search_range=11, memory=3)
    t_flt = tp.filter_stubs(t, 3*int(len(frames)/4))
    try:
        d = tp.compute_drift(t_flt)
        t_cor = tp.subtract_drift(t_flt, d)
    except:
        t_cor=t_flt
        logging.info("drift correction excepted")    
    # plt.figure()
    # tp.plot_traj(t_flt)
    # plt.figure()
    # d.plot()
    imsd=tp.imsd(t_cor,0.1,0.2, max_lagtime=100, statistic='msd')
    emsd=tp.emsd(t_cor,0.1,0.2, max_lagtime=100)
    return imsd,emsd
Example #21
0
    def filter_traj(self, threshold: int):
        self.trajectories = tp.filter_stubs(self.trajectories, threshold)
        self.effective_link_config['threshold'] = threshold

        self._draw()
def split_traj_at_long_trajectorie(t4_cutted,
                                   settings,
                                   Min_traj_length=None,
                                   Max_traj_length=None):
    """ split trajectories if they are too long
    
    This might be usefull, to have sufficently long trajectories all at the same length.
    Otherwise they have very different confidence intervalls
    E.g: 2 particles: 1: 500 frames, 2: 2000 frames
    particle 2 is splitted into 4 500frames
    
    Splitting of a particle is fine, because a particle can go out of focus and return later
    and is assigned as new particle too.
    
    Important is to look at the temporal component, thus particle 2 never exists twice
    """
    keep_tail = settings["Split"]["Max_traj_length_keep_tail"]

    if Max_traj_length is None:
        Max_traj_length = int(settings["Split"]["Max_traj_length"])

    if Min_traj_length is None:
        Min_traj_length = int(settings["Link"]["Min_tracking_frames"])

    free_particle_id = np.max(t4_cutted["particle"]) + 1

    #    Max_traj_length = 1000

    t4_cutted["true_particle"] = t4_cutted["particle"]

    traj_length = t4_cutted.groupby([
        "particle"
    ]).frame.max() - t4_cutted.groupby(["particle"]).frame.min()

    # split when two times longer required
    split_particles = traj_length > Max_traj_length

    particle_list = split_particles.index[split_particles]

    particle_list = np.asarray(particle_list.values, dtype='int')

    num_particle_list = len(particle_list)

    for count, test_particle in enumerate(particle_list):
        nd.visualize.update_progress("Split too long trajectories",
                                     (count + 1) / num_particle_list)

        #        start_frame = t4_cutted[t4_cutted["particle"] == test_particle]["frame"].iloc[0]
        #        end_frame   = t4_cutted[t4_cutted["particle"] == test_particle]["frame"].iloc[-1]

        start_frame = t4_cutted[t4_cutted["particle"] ==
                                test_particle]["frame"].iloc[0]
        #        end_frame   = t4_cutted[t4_cutted["particle"] == test_particle]["frame"].iloc[-1]

        traj_length = len(t4_cutted[t4_cutted["particle"] == test_particle])

        print("traj_length", traj_length)
        while traj_length > Max_traj_length:
            if (traj_length > 2 * Max_traj_length) or (keep_tail == 0):
                start_frame = t4_cutted[
                    t4_cutted["particle"] ==
                    test_particle].iloc[Max_traj_length]["frame"]
                t4_cutted.loc[(t4_cutted["particle"] == test_particle) &
                              (t4_cutted["frame"] >= start_frame),
                              "particle"] = free_particle_id

                test_particle = free_particle_id
                free_particle_id = free_particle_id + 1

                traj_length = len(
                    t4_cutted[t4_cutted["particle"] == test_particle])
            else:
                break

    if keep_tail == 0:
        t4_cutted = tp.filter_stubs(t4_cutted, Min_traj_length)

    return t4_cutted
Example #23
0
            continue
        if region.major_axis_length < 50 or region.major_axis_length > 90:
            continue
        if region.minor_axis_length < 4 or region.minor_axis_length > 7:
            continue
        feature = feature + 1
        
#        minr, minc, maxr, maxc = region.bbox
#        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
#                              fill=False, edgecolor='red', linewidth=1)
        elli = mpatches.Ellipse([region.centroid[1],region.centroid[0]],region.major_axis_length,
                                region.minor_axis_length,-region.orientation/6.28*360,fill=False,
                                edgecolor='red',linewidth=2)
#        ax.add_patch(rect)
#        plt.draw()
        ax.add_patch(elli)
        plt.draw()
        features = features.append([{'y':region.centroid[0],
                        'x':region.centroid[1],
                        'theta':-region.orientation,
                        'frame':num,}])
    print('%d features found in frame %d' % (feature,num))
features.to_csv(os.path.join(datapath,'initial_tracking.dat'))
ti2 = time.time()

t = tp.link_df(features,40,memory=5) # can disappear at most 5 frames
t1 = tp.filter_stubs(t, 30) # appear at least in 50 frames
t1.to_csv(os.path.join(datapath,'trajectory.dat'))
print('###########################################')
print('Tracking finished: %f seconds for %d frames' % ((ti2 - ti1),nframe))
Example #24
0
time_cutoff = 419
f = tp.batch(frames[:time_cutoff], 11, minmass=49000, max_iterations=20)
print("f length is:", time_cutoff - 2)

# link features into particle trajectories
# define max displacement
max_disp = 5
# define frame memory for feature drop-out
frame_memory = 5

t = tp.link_df(f, max_disp, memory=frame_memory)
print(t.head())

# spurious ephemeral trajectory filtering
# filter features that last for a given number of frames
t1 = tp.filter_stubs(t, time_cutoff)
# Compare the number of particles in the unfiltered and filtered data.
print('Before:', t['particle'].nunique())
print('After:', t1['particle'].nunique())

# filter by appearance (size vs mass)
plt.figure()
tp.mass_size(t1.groupby('particle').mean())
plt.show()

t2 = t1[((t1['mass'] > 0.05) & (t1['size'] < 3.0) &  # fix mass
         (t1['ecc'] < 0.3))]

plt.figure()
tp.annotate(t2[t2['frame'] == 0], frames[0])
plt.show()
Example #25
0
path = r'C:\Users\elpresidente_2\Desktop\MatlabPA14\Particle Tracking\cropped_movie'
frames = pims.open(path + r'\*.tif')
print(type(frames))
proc_frames = []
# First detect particles in each frame
for frame in frames:
    frame_ = cv2.GaussianBlur(frame, (5, 5), 0)
    frame = cv2.adaptiveThreshold(frame_, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                  cv2.THRESH_BINARY, 9, 2)
    proc_frames.append(frame)

loc = tp.batch(proc_frames, 13, invert=True,
               minmass=2000)  # Found minmass from histogram

t = tp.link(loc, 9, memory=4)
t1 = tp.filter_stubs(t, 20)
plt.figure()
tp.plot_traj(t1)
plt.show()

#tp.annotate(f, frames[0])

#fig, ax = plt.subplots()
#ax.hist(f['mass'], bins=20)
#ax.set_xlabel('mass')
#ax.set_ylabel('count')
#plt.show()

#f = tp.locate(th3, 13, invert=True, minmass = 2000)
#tp.annotate(f, frame)
Example #26
0
# In[ ]:


plt.figure(figsize=[12,12])
plt.imshow(v0R+bk0R)
#plt.scatter(tR['x'],tR['y'],s=0.3,c='g')
plt.scatter(tR['x'],tR['y'],s=0.3,c=tR['mass'])
plt.show()


# In[ ]:


# Remove tracks too few points (less than 500)
tR1 = tp.filter_stubs(tR,200)
plt.figure(figsize=[12,12])
tp.plot_traj(tR1);


# In[ ]:


plt.figure(figsize=[12,12])
plt.imshow(v0R)
plt.scatter(tR1['x'],tR1['y'],s=0.3,c=tR1['mass'])
#plt.scatter(tR1['x'],tR1['y'],s=0.3,c='r')
plt.show()


# In[ ]:
Example #27
0
def frames2coords(frames,out_fh,
                  params_locate,params_msd,params_link_df={'search_range':20,},
                  mass_cutoff=0.5,size_cutoff=0.5,ecc_cutoff=0.5,
                    filter_stubs=True,flt_mass_size=True,flt_incomplete_trjs=True,
                    force=False,test=False):
    dns=['f_batch','t','t1','t2']
    dn2dp={dn:f'{out_fh}.{dn}.tsv' for dn in dns}
    dn2df={}
    if not exists(dn2dp['t2']) or force:
        if not exists(dn2dp['t']) or force:
            dn2df['f_batch']=tp.batch(frames,engine='numba',**params_locate)
            dn2df['t']=tp.link_df(dn2df['f_batch'], **params_link_df)
            print(params_link_df)
            dn2df['f_batch'].to_csv(dn2dp['f_batch'],sep='\t')
            dn2df['t'].to_csv(dn2dp['t'],sep='\t')
        else:
            dn2df['t']=pd.read_csv(dn2dp['t'])
        max_lagtime_stubs=params_msd["max_lagtime"]*params_msd["fps"]
        if filter_stubs:
            dn2df['t1'] = tp.filter_stubs(dn2df['t'], max_lagtime_stubs*1.25)
            logging.info('filter_stubs: particle counts: %s to %s' % (dn2df['t']['particle'].nunique(),dn2df['t1']['particle'].nunique()))
            if t1['particle'].nunique()==0:
                logging.error('filter_stubs: particle counts =0; using less stringent conditions')
                dn2df['t1'] = tp.filter_stubs(dn2df['t'], max_lagtime_stubs*1)
        else:
            dn2df['t1'] = dn2df['t'].copy()

        if test:        
            fig=plt.figure()
            ax=plt.subplot(111)
            tp.mass_size(dn2df['t1'].groupby('particle').mean(),ax=ax);
            plt.tight_layout()
            plt.savefig('%s.mass_size.svg' % out_fh,format='svg')        
        if flt_mass_size:
            dn2df['t2'] = dn2df['t1'][((dn2df['t1']['mass'] > dn2df['t1']['mass'].quantile(mass_cutoff)) & (dn2df['t1']['size'] < dn2df['t1']['size'].quantile(size_cutoff)) &
                     (dn2df['t1']['ecc'] < ecc_cutoff))]
            logging.info('filter_mass_size: particle counts: %s to %s' % (dn2df['t1']['particle'].nunique(),dn2df['t2']['particle'].nunique()))
            if len(t2)==0:
                dn2df['t2'] = dn2df['t1'].copy()
                logging.warning('filter_mass_size produced 0 particles; using t2=t1.copy()')
        else:
            dn2df['t2'] = dn2df['t1'].copy()
        if test:        
            fig=plt.figure()
            ax=plt.subplot(111)
            tp.mass_size(dn2df['t2'].groupby('particle').mean(),ax=ax);
            plt.tight_layout()
            plt.savefig('%s.mass_size_post_filtering.svg' % out_fh,format='svg')        
        if flt_incomplete_trjs:
            dn2df['t2']=dn2df['t2'].reset_index()
            vals=pd.DataFrame(dn2df['t2']['particle'].value_counts())
            partis=[i for i in vals.index if vals.loc[i,'particle']>=int(vals.max())*0.95 ]
            dn2df['t2']=dn2df['t2'].loc[[i for i in dn2df['t2'].index if (dn2df['t2'].loc[i,'particle'] in partis)],:]
        dn2df['t2'].to_csv(dn2dp['t2'],sep='\t')
    else:
        dn2df['t2']=pd.read_csv(dn2dp['t2'],sep='\t')
    if test:
        for traj in ['t','t1','t2']:
            ax=plot_traj(frames[-1],traj=dn2df[traj])
        logging.info('getting plots hist')
        cols=['mass','size','ecc','signal','raw_mass','ep']
        fig=plt.figure()
        ax=plt.subplot(111)
        _=dn2df['t2'].loc[:,cols].hist(ax=ax)        
    return dn2df['t2']
Example #28
0
def get_data(outdir, red, green, blue, diam=11):
    ''' Loads the output of the preprocessing steps for particle extraction
        Returns the formatted data
    '''
    frames = pims.ImageSequence("../" + outdir + "/*tif")
    print frames

    # particle diameter
    features = tp.batch(frames[:frames._count], diameter=diam, \
                        minmass=1, invert=True)

    # Link features in time
    search_range = diam - 2  # sigma_(max)

    lframes = int(np.floor(frames._count / 3))  # r, g, b images are loaded
    imax = int(np.floor(15 * lframes / 100))  # default max 15% frame count
    t = tp.link_df(features, search_range, memory=imax)
    # default neighbour strategy: KDTree

    # Filter spurious trajectories
    imin = int(np.floor(10 * lframes / 100))  # default min 10% frame count
    t1 = tp.filter_stubs(t, imin)  # if seen in imin

    # Compare the number of particles in the unfiltered and filtered data
    print 'Unique number of particles (Before filtering):', t[
        'particle'].nunique()
    print '(After):', t1['particle'].nunique()

    # export pandas data frame with filename being current date and time
    timestr = time.strftime("%Y%m%d-%H%M%S")

    data = pd.DataFrame({ 'x': t1.x, 'y': t1.y,'z':t1.frame,\
                        'mass':t1.mass, 'size':t1.size, 'ecc':t1.ecc,\
                        'signal':t1.signal, 'ep':t1.ep, 'particle':t1.particle\
                        })

    # format the dataframe / original indexing
    data["n"] = np.arange(len(data))

    print("Sorting dataframe by time...")
    data = data.sort(columns='z', ascending=True)

    print("Extracting pixel values of particles...")
    r, g, b = get_val(red, 2, data), get_val(green, 1,
                                             data), get_val(blue, 0, data)

    print("Normalising rgb values to relative quantities...")
    r1, g1, b1 = np.array(r), np.array(g), np.array(b)
    r = (r1 - np.min(r1)) * (65535 / np.max(r1))
    g = (g1 - np.min(g1)) * (65535 / np.max(g1))
    b = (b1 - np.min(b1)) * (65535 / np.max(b1))

    print("Adding (r,g,b) values as columns to dataframe...")
    strname, px_val = ["r", "g", "b"], [r, g, b]
    add_arrays_df(strname, px_val, data)

    # sort back to original state
    data = data.sort(columns='n', ascending=True)

    # remove the previously created column
    data.drop('n', axis=1, inplace=True)

    # format df with rgb values to uint8
    data = format_df(data)

    print "Dataframe summary:\n", data.describe()
    file_name = "../particles_" + timestr + ".csv"
    print "Exporting %s" % (file_name)
    data.to_csv(file_name, sep='\t', encoding='utf-8')

    return data
Example #29
0
    def get_53bp1_blob_mask(self):
        # If no mask ref file, use raw file automatically
        frames = nonempty_openfile1_or_openfile2(
            self.config.OUTPUT_PATH, self.config.MASK_53BP1_BLOB_NAME,
            self.config.ROOT_NAME + '-raw.tif')

        # If only 1 frame available, duplicate it to enough frames_num.
        tot_frame_num = len(
            imread(self.config.OUTPUT_PATH + self.config.ROOT_NAME +
                   '-raw.tif'))
        if frames.ndim == 2:
            dup_frames = np.zeros(
                (tot_frame_num, frames.shape[0], frames.shape[1]),
                dtype=frames.dtype)
            for i in range(tot_frame_num):
                dup_frames[i] = frames
            frames = dup_frames

        # Get mask file and save it using 255 and 0
        imsave(
            self.config.OUTPUT_PATH + self.config.ROOT_NAME + '-tempFile.tif',
            frames)
        pims_frames = pims.open(self.config.OUTPUT_PATH +
                                self.config.ROOT_NAME + '-tempFile.tif')

        blobs_df, det_plt_array = detect_blobs(
            pims_frames[0],
            min_sig=self.config.MASK_53BP1_BLOB_MINSIG,
            max_sig=self.config.MASK_53BP1_BLOB_MAXSIG,
            num_sig=self.config.MASK_53BP1_BLOB_NUMSIG,
            blob_thres=self.config.MASK_53BP1_BLOB_THRES,
            peak_thres_rel=self.config.MASK_53BP1_BLOB_PKTHRES_REL,
            r_to_sigraw=1.4,
            pixel_size=self.config.PIXEL_SIZE,
            diagnostic=True,
            pltshow=True,
            plot_r=False,
            truth_df=None)

        blobs_df, det_plt_array = detect_blobs_batch(
            pims_frames,
            min_sig=self.config.MASK_53BP1_BLOB_MINSIG,
            max_sig=self.config.MASK_53BP1_BLOB_MAXSIG,
            num_sig=self.config.MASK_53BP1_BLOB_NUMSIG,
            blob_thres=self.config.MASK_53BP1_BLOB_THRES,
            peak_thres_rel=self.config.MASK_53BP1_BLOB_PKTHRES_REL,
            r_to_sigraw=1.4,
            pixel_size=self.config.PIXEL_SIZE,
            diagnostic=False,
            pltshow=False,
            plot_r=False,
            truth_df=None)

        blobs_df = tp.link_df(
            blobs_df,
            search_range=self.config.MASK_53BP1_BLOB_SEARCH_RANGE,
            memory=self.config.MASK_53BP1_BLOB_MEMORY)
        blobs_df = tp.filter_stubs(
            blobs_df, self.config.MASK_53BP1_BLOB_TRAJ_LENGTH_THRES)
        blobs_df = blobs_df.reset_index(drop=True)

        masks_53bp1_blob = blobs_df_to_mask(frames, blobs_df)

        os.remove(self.config.OUTPUT_PATH + self.config.ROOT_NAME +
                  '-tempFile.tif')

        return masks_53bp1_blob
    def link_mitos(self, tracking_seach_radius: int,
                   last_timepoint: int) -> None:
        """Links previously found mitochondria into trajectories

        Runs the particle linking algorithm using existing particle locations.
        Much faster than finding particles again in the whole trial and then
        linking with different parameters. Useful if particle locations look
        good, but the trajectories look wrong. Rather than returning a dict
        with the particles, it just saves them directly to the
        trackpyBatchResults.yaml file in analyzed_data_location. Also
        saves unlinked particle finding results and images of results.

        Args:
            tracking_seach_radius: int for maximum search radius for linking
            last_timepoint: int for last timepoint to analyze


        Returns:
            None
        """

        print('linking partiles...')

        save_location = self.analyzed_data_location

        # link the particles we found between time points
        linked = tp.link_df(self.mitos_from_batch.loc[(
            self.mitos_from_batch['frame'] < last_timepoint)],
                            search_range=tracking_seach_radius,
                            pos_columns=['z', 'y', 'x'])

        # only keep trajectories where point appears in all frames
        self.linked_mitos = tp.filter_stubs(linked, last_timepoint)

        # load the file of parameters that tp.batch saved previously
        with open(save_location.joinpath('trackpyBatchParams.yaml'),
                  'r') as yamlfile:
            old_yaml = yaml.safe_load(yamlfile)

        # add parameters used for linking to the yaml created by tp.batch
        other_param_dict = dict({
            'tracking_seach_radius': tracking_seach_radius,
            'last_timepoint': last_timepoint
        })
        new_param_dict = {**old_yaml, **other_param_dict}
        with open(save_location.joinpath('trackpyBatchParams.yaml'),
                  'w') as yamlfile:
            yaml.dump(new_param_dict, yamlfile, default_flow_style=False)

        # dump the latest analysis into the history file
        with open(self.batch_history_file, 'a') as yamlfile:
            yaml.dump(new_param_dict, yamlfile, explicit_start=True)

        # save the results to a yaml file
        linked_mitos_dict = self.linked_mitos.reset_index(drop=True).to_dict(
            orient='index')
        with open(save_location.joinpath('trackpyBatchResults.yaml'),
                  'w') as yamlfile:
            yaml.dump(linked_mitos_dict,
                      yamlfile,
                      explicit_start=True,
                      default_flow_style=False)
    def run_batch(self, images_ndarray: np.ndarray, roi: Tuple['xmin', 'ymin',
                                                               'xmax', 'ymax'],
                  gaussian_width: int, particle_z_diameter: int,
                  particle_xy_diameter: int, brightness_percentile: int,
                  min_particle_mass: int, bottom_slice: int, top_slice: int,
                  tracking_seach_radius: int, last_timepoint: int,
                  notes: str) -> None:
        """Runs particle finding algorithm on entire trial.

        Runs the particle finding algorithm on the whole trial. Rather than
        returning a dict with the particles, it just saves them directly to
        the trackpyBatchResults.yaml file in analyzed_data_location. Also
        saves unlinked particle finding results and images of results.

        Args:
            images_ndarray: Numpy array with all the image data
            roi: Tuple with xmin, ymin, xmax, ymax defining region of interest
            gaussian_width: int specifying the width of Gaussian blur kernel
            particle_z_diameter: int for maximum particle size in z
            particle_xy_diameter: int for maximum particle size in x and y
            brightness_percentile: int for brightness threshold as percentile
            min_particle_mass: int for minimum integrated mass cutoff
            bottom_slice: int for which slice to use as bottom of the stack
            top_slice: int for which slice to use as top of the stack
            tracking_seach_radius: int for maximum search radius for linking
            last_timepoint: int for last timepoint to analyze
            notes: str for short note about goal of this run


        Returns:
            None
        """

        slices_to_analyze = images_ndarray[:last_timepoint,
                                           bottom_slice:top_slice,
                                           roi[1]:roi[3], roi[0]:roi[2]]
        particle_diameter = (particle_z_diameter, particle_xy_diameter,
                             particle_xy_diameter)
        save_location = self.analyzed_data_location
        metadata_save_location = str(
            save_location.joinpath('trackpyBatchParams.yaml'))

        # run batch of images with the current set of parameters
        self.mitos_from_batch = tp.batch(frames=slices_to_analyze,
                                         diameter=particle_diameter,
                                         percentile=brightness_percentile,
                                         minmass=min_particle_mass,
                                         noise_size=gaussian_width,
                                         meta=metadata_save_location,
                                         characterize=True)

        # link the particles we found between time points
        linked = tp.link_df(self.mitos_from_batch,
                            tracking_seach_radius,
                            pos_columns=['z', 'y', 'x'])

        # only keep trajectories where point appears in all frames
        self.linked_mitos = tp.filter_stubs(linked, last_timepoint)

        # Correct for roi offset
        self.mitos_from_batch['x'] = (self.mitos_from_batch['x'] +
                                      min([roi[0], roi[2]]))
        self.mitos_from_batch['y'] = (self.mitos_from_batch['y'] +
                                      min([roi[1], roi[3]]))
        self.linked_mitos['x'] = (self.linked_mitos['x'] +
                                  min([roi[0], roi[2]]))
        self.linked_mitos['y'] = (self.linked_mitos['y'] +
                                  min([roi[1], roi[3]]))

        # add other parameters to the yaml created by tp.batch
        if notes == 'Notes for analysis run':
            notes = 'none'
        other_param_dict = dict({
            'tracking_seach_radius': tracking_seach_radius,
            'bottom_slice': bottom_slice,
            'top_slice': top_slice,
            'last_timepoint': last_timepoint,
            'roi': roi,
            'notes': notes
        })
        with open(save_location.joinpath('trackpyBatchParams.yaml'),
                  'a') as yamlfile:
            yaml.dump(other_param_dict, yamlfile, default_flow_style=False)

        # load the file again now that it has all parameters
        with open(save_location.joinpath('trackpyBatchParams.yaml'),
                  'r') as yamlfile:
            cur_yaml = yaml.safe_load(yamlfile)

        # dump the latest analysis into the history file
        with open(self.batch_history_file, 'a') as yamlfile:
            yaml.dump(cur_yaml, yamlfile, explicit_start=True)

        # save the results to a yaml file
        linked_mitos_dict = self.linked_mitos.reset_index(drop=True).to_dict(
            orient='index')
        with open(save_location.joinpath('trackpyBatchResults.yaml'),
                  'w') as yamlfile:
            yaml.dump(linked_mitos_dict,
                      yamlfile,
                      explicit_start=True,
                      default_flow_style=False)
        mitos_from_batch_dict = self.mitos_from_batch.reset_index(
            drop=True).to_dict(orient='index')
        with open(save_location.joinpath('unlinkedTrackpyBatchResults.yaml'),
                  'w') as yamlfile:
            yaml.dump(mitos_from_batch_dict,
                      yamlfile,
                      explicit_start=True,
                      default_flow_style=False)

        self.save_diag_figs(images_ndarray, self.linked_mitos,
                            self.mitos_from_batch, save_location)
    # Extract the location of features in each frame
    features = tp.batch(frames[startFrame:startFrame + nFrames],
                        blobSize,
                        minmass=minMass,
                        invert=False,
                        engine='numba',
                        characterize=False)

    #    pred = tp.predict.NearestVelocityPredict()
    # Link the features in each frame to make tracks
    tracks = tp.link_df(features, particleSize * 5, memory=5)
    #    tracks = pred.link_df(pd.concat(frames), 0.5)

    # Remove tracks which are lesser than 1 s in length
    tracks1 = tp.filter_stubs(tracks, 60)

    # Generate tracks from the Frames
    # For each unique particle store all the information corresponding to the particle

    nTracks = max(tracks1['particle']) + 1

    print('Total number of tracks in this dataset: {}'.format(nTracks))

    Tracks = []
    # Create a datastructure storing the track of each particle
    for ii in range(nTracks):

        particleFrames = tracks1[tracks1['particle'] == ii]['frame']
        particleX = tracks1[tracks1['particle'] == ii]['x']
        particleY = tracks1[tracks1['particle'] == ii]['y']
Example #33
0
	def get_53bp1_blob_mask(self):
		# If no mask ref file, use raw file automatically
		frames = nonempty_openfile1_or_openfile2(self.config.OUTPUT_PATH,
					self.config.MASK_53BP1_BLOB_NAME,
					self.config.ROOT_NAME+'-raw.tif')[list(self.config.TRANGE),:,:]

		# If regi params csv file exsits, load it and do the registration.
		if osp.exists(self.config.OUTPUT_PATH + self.config.ROOT_NAME + '-regiData.csv'):
			regi_params_array_2d = pd.read_csv(self.config.OUTPUT_PATH +
							self.config.ROOT_NAME + '-regiData.csv').to_numpy()
			frames = apply_regi_params(frames, regi_params_array_2d)

		# # Boxcar denoise the frames
		# frames = frames / frames.max()
		# frames = img_as_ubyte(frames)
		# frames = filter_batch(frames, method='boxcar', arg=self.config.BOXCAR_RADIUS)

		# Get mask file and save it using 255 and 0
		imsave(self.config.OUTPUT_PATH + self.config.ROOT_NAME + '-tempFile.tif',
				frames)
		pims_frames = pims.open(self.config.OUTPUT_PATH + self.config.ROOT_NAME +
								'-tempFile.tif')

		blobs_df, det_plt_array = detect_blobs(pims_frames[0],
									min_sig=self.config.MASK_53BP1_BLOB_MINSIG,
									max_sig=self.config.MASK_53BP1_BLOB_MAXSIG,
									num_sig=self.config.MASK_53BP1_BLOB_NUMSIG,
									blob_thres=self.config.MASK_53BP1_BLOB_THRES,
									peak_thres_rel=self.config.MASK_53BP1_BLOB_PKTHRES_REL,
									r_to_sigraw=1.4,
									pixel_size=self.config.PIXEL_SIZE,
									diagnostic=True,
									pltshow=True,
									plot_r=False,
									truth_df=None)

		blobs_df, det_plt_array = detect_blobs_batch(pims_frames,
									min_sig=self.config.MASK_53BP1_BLOB_MINSIG,
									max_sig=self.config.MASK_53BP1_BLOB_MAXSIG,
									num_sig=self.config.MASK_53BP1_BLOB_NUMSIG,
									blob_thres=self.config.MASK_53BP1_BLOB_THRES,
									peak_thres_rel=self.config.MASK_53BP1_BLOB_PKTHRES_REL,
									r_to_sigraw=1.4,
									pixel_size=self.config.PIXEL_SIZE,
									diagnostic=False,
									pltshow=False,
									plot_r=False,
									truth_df=None)


		blobs_df = tp.link_df(blobs_df,
									search_range=self.config.MASK_53BP1_BLOB_SEARCH_RANGE,
									memory=self.config.MASK_53BP1_BLOB_MEMORY)
		blobs_df = tp.filter_stubs(blobs_df, self.config.MASK_53BP1_BLOB_TRAJ_LENGTH_THRES)
		blobs_df = blobs_df.reset_index(drop=True)

		masks_53bp1_blob = blobs_df_to_mask(frames, blobs_df)

		os.remove(self.config.OUTPUT_PATH + self.config.ROOT_NAME + '-tempFile.tif')

		return masks_53bp1_blob
Example #34
0
import trackpy as tp
plt.ion
# Optionally, tweak styles.
mpl.rc('figure',  figsize=(10, 6))
mpl.rc('image', cmap='gray')
aa = pims.ImageSequence('./frame_crop/*.jpg', as_grey=True)
# f = tp.locate(aa[0], 33, invert=True)
f= pd.read_csv("./trackpyResult/forceCSV.csv")
fig=plt.figure()
#fig=tp.annotate(f, aa[0])
#fig.figure.savefig("./trackpyResult/trackpyAnnotation.jpg")
#f = tp.batch(aa[:], 11, minmass=200, invert=True);
#f = tp.batch(aa[:], 11, invert=True);
fig, ax = plt.subplots()
t = tp.link_df(f, 5, memory=3)
t1 = tp.filter_stubs(t, 50)
print(t1)
t1.to_csv("./trackpyResult/t1.csv")
# Compare the number of particles in the unfiltered and filtered data.
print('Before:', t['particle'].nunique())
print('After:', t1['particle'].nunique())
#fig=plt.figure()
#fig=tp.mass_size(t1.groupby('particle').mean()); # convenience function -- just plots size vs. mass
#fig.figure.savefig("./trackpyResult/particle.jpg")
fig=plt.figure()
fig=tp.plot_traj(t1)
fig.figure.savefig("./trackpyResult/trajectoryI.jpg")
t2 = t1
fig=plt.figure()
fig=tp.annotate(t2[t2['frame'] == 0], aa[0]);
fig.figure.savefig("./trackpyResult/t2Annotation.jpg")