def filter(self, dataframe=None, store=True): # Retrieve the dataframe if dataframe is None: dataframe = self.spots # Connect positions together dataframe = tp.link( dataframe, self.search_range, memory=self.memory, adaptive_stop=self.adaptive_stop, adaptive_step=self.adaptive_step, neighbor_strategy=self.neighbor_strategy, link_strategy=self.link_strategy, ) # Remove spurious trajectory if self.filter_stubs is not None: dataframe = tp.filtering.filter_stubs(dataframe, threshold=self.filter_stubs) # Regenerate the index dataframe = dataframe.reset_index(drop=True) # Store in the instance if store: self.tracks = deepcopy(dataframe) return dataframe
def workerFunc(locData: Optional[pd.DataFrame], searchRange: float, memory: int) -> Union[pd.DataFrame, None]: """Perform tracking Parameters ---------- locData Localization data for tracking searchRange `search_range` parameter to :py:func:`trackpy.link` memory `memory` parameter to :py:func:`trackpy.link` Returns ------- Tracked data """ if locData is None: return None if not locData.size: ret = locData.copy() ret["particle"] = [] return ret trackpy.quiet() return trackpy.link(locData, search_range=searchRange, memory=memory)
def time_series_analysis(particles, max_dist=1, memory=3, properties=['area']): """ Perform tracking of particles for times series data. Parameters ---------- particles : Particle_list object. max_dist : int The maximum distance between the same particle in subsequent images. memory : int The number of frames to remember particles over. properties : list A list of particle properties to track over the time series. Returns ------- Pandas DataFrame of tracjectories. """ df = pd.DataFrame(columns=['y', 'x'] + properties + ['frame']) for particle in particles.list: pd_dict = { 'x': particle.properties['x']['value'], 'y': particle.properties['y']['value'] } for property in properties: pd_dict.update({property: particle.properties[property]['value']}) pd_dict.update({'frame': particle.properties['frame']['value']}) df = df.append([pd_dict]) t = trackpy.link(df, max_dist, memory=memory) return (t)
def test_link_memory(self): expected = pd.DataFrame(self.coords_link, columns=self.pos_columns + ['frame']) expected['frame'] = expected['frame'].astype(np.int) actual = tp.link(expected, memory=self.memory, **self.link_params) expected['particle'] = self.expected_link_memory assert_traj_equal(actual, expected)
def localize(video, method='tf', background=None, dark_count=31): ''' Returns DataFrame of particle parameters in each frame of a video linked with their trajectory index Args: video: video filename Keywords: background: background image for normalization dark_count: dark count of camera ''' if method == 'tf': trk = tracker.tracker() # Create VideoCapture to read video cap = cv2.VideoCapture(video) # Initialize components to build overall dataset. frame_no = 0 data = [] while(cap.isOpened()): ret, image = cap.read() if ret is False: break image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Normalize image if background is not None: image = (image - dark_count) / (background - dark_count) # Find features in each frame if method == 'tf': features = trk.predict(inflate(image)) elif method == 'oat': features, circ = oat(image, frame_no) else: raise(ValueError("method must be either \'oat\' or \'tf\'")) for feature in features: # Build dataset over all frames. feature = np.append(feature, frame_no) data.append(feature) # Advance frame_no frame_no += 1 cap.release() # Put data set in DataFrame and link result_df = pd.DataFrame(columns=['x', 'y', 'w', 'h', 'frame'], data=data) linked_df = tp.link(result_df, search_range=5, pos_columns=['y', 'x']) return linked_df
def link(self, search_range: int = 5, memory: int = 2, **configs): # Make sure particles have been located if self.locations is None: print(' ! No locations found.') return configs['search_range'] = search_range configs['memory'] = memory # self.link_config has the highest priority configs.update(self.link_configs) # Link locations self.trajectories = tp.link(self.locations, **configs) console.show_status('Linking completed. Configurations:') console.supplement(configs) self.effective_link_config = configs self._draw()
def get_link(locs, search_range, memory): ''' Apply trackpy.link_df() (`trackpy`_) on localizations with given search_range and memory to get trajectories sorted by group and frame. All tracks shorter or equal to 10 frames are removed. Args: locs(pandas.DataFrame): Localizations as generated by `picasso.localize`_ as pandas.DataFrame info(picasso.io): Info _locs.yaml to _locs.hdf5 localizations as list of dictionaries. search_range(int): Localizations within search_range (spatial) will be connected to tracks (see trackpy.link_df) memory(int): Localizations within memory (temporal) will be connected to tracks (see trackpy.link_df) Return: pandas.DataFrame: Linked trajectories using trackpy.link(). - ``group`` instead of ``particle`` column for `picasso.render`_ compatibility. - All trajectories with <= 10 localizations are already removed!! ''' ### Link locs link = tp.link( locs, search_range, memory=memory, link_strategy='hybrid', ) ### Sort and rename print('Sorting by [group,frame] ... ') link.sort_values(by=['particle', 'frame'], ascending=True, inplace=True) link = link.rename( columns={'particle': 'group'}) # Rename to groups for picasso compatibility ### Throw away tracks with only 10 localizations link = drop_shorttracks(link) return link
def link(self, search_range=15, memory=3): self.data.df = tp.link(self.data.df.reset_index(), search_range, memory=memory).set_index('frame') self.data.save()
def link(self, *args, **kwargs): kwargs.update(self.linker_opts) return tp.link(*args, **kwargs)
import filehandling from particletracking import dataframes import trackpy as tp # direc1 = filehandling.open_directory(initialdir='/media/data/Data') # files1 = filehandling.get_directory_filenames(direc1+'/*.hdf5') # # direc2 = filehandling.open_directory(initialdir='/media/data/Data') # files2 = filehandling.get_directory_filenames(direc1+'/*.hdf5') # # files = files1 + files2 files = ["/media/data/Data/FirstOrder/PhaseDiagram/FlatPlate2Feb2021/2000.hdf5",] for file in files: with dataframes.DataStore(file) as data: d = data.df.copy() print(d.head()) d = d.reset_index() print(d.head()) d = tp.link(d, d.r.mean())
link_distance = tracking_setting['link_distance'] memory = tracking_setting['memory'] adaptive_step = tracking_setting['adaptive_step'] adaptive_stop = tracking_setting['adaptive_stop'] df_data_tracked = pd.DataFrame() for pos in pos_list: print("tracking nuclei for Pos %d...." % int(pos)) df_select = df_data.loc[df_data['Pos'] == pos].copy().reset_index( drop=True) df_select = tp.link(df_select, link_distance, pos_columns=['cent_x', 'cent_y'], t_column='frame', memory=memory, adaptive_step=adaptive_step, adaptive_stop=adaptive_stop) df_select = df_select.rename(columns={'particle': 'ID'}) df_select = df_select.sort_values(['frame', 'ID']) df_select['ID'] = df_select['ID'] + 1 # to make it start from 1 df_data_tracked = pd.concat((df_data_tracked, df_select)) print("Done!\n") df_data = df_data_tracked.reset_index(drop=True) del df_data_tracked del df_select
path = r'C:\Users\elpresidente_2\Desktop\MatlabPA14\Particle Tracking\cropped_movie' frames = pims.open(path + r'\*.tif') print(type(frames)) proc_frames = [] # First detect particles in each frame for frame in frames: frame_ = cv2.GaussianBlur(frame, (5, 5), 0) frame = cv2.adaptiveThreshold(frame_, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 9, 2) proc_frames.append(frame) loc = tp.batch(proc_frames, 13, invert=True, minmass=2000) # Found minmass from histogram t = tp.link(loc, 9, memory=4) t1 = tp.filter_stubs(t, 20) plt.figure() tp.plot_traj(t1) plt.show() #tp.annotate(f, frames[0]) #fig, ax = plt.subplots() #ax.hist(f['mass'], bins=20) #ax.set_xlabel('mass') #ax.set_ylabel('count') #plt.show() #f = tp.locate(th3, 13, invert=True, minmass = 2000) #tp.annotate(f, frame)
dfn = os.path.join(output_dir, 'tracking.pickle') if not os.path.exists(dfn): print("Tracking points...") import trackpy as tp # Open the 3D movie vol = open_3D_movie(mfn) # Identify the points in the volume data = tp.batch(vol, 5) # Note: the second parameter is the size in the frame. In this case, # 5 is approximately right, but you would need to tweak for a different # data set! # Link the particles into tracks data = tp.link(data, 10, memory=3) print(f'Found {data.particle.max()+1} particle tracks') # Find the physical coordinates. These will appear as the columns "xc", "yc", # and "zc" in the data frame *after* you run this command, and will be in the # physical units of the data (i.e. L and not N) vol.distortion.update_data_frame(data) # Save to a pickle file with open(dfn, "wb") as f: pickle.dump(data, f) else: print(f"Found existing track data, delete {dfn} to regenerate...") with open(dfn, "rb") as f: data = pickle.load(f)
plt.figure() plot_blobs(substack[i][:, :, 2], blobs) plt.savefig(f'img_w_blobs_{i}.png') plt.close() #%% all_blob_data = [] for i, each in tqdm.tqdm(enumerate(allblobs)): blob_locs = pd.DataFrame(data={'x': [], 'y': [], 'frame': []}) blob_locs['x'] = each[:, 1] blob_locs['y'] = each[:, 0] blob_locs['frame'] = i all_blob_data.append(blob_locs) allblob_data = pd.concat(all_blob_data).reset_index(drop=True) #%% Link blob blocations into tracks linked = tp.link(allblob_data, search_range=30, memory=5) filt_linked = tp.filter_stubs(linked) by_pid = filt_linked.groupby('particle') #%% # Remove all tracks which are suspiciously stationary def startend_travelled(track_df): if track_df.shape[0] < 2: return 0 start_pos = track_df.loc[np.min(track_df.index), ['x', 'y']] end_pos = track_df.loc[np.max(track_df.index), ['x', 'y']] dist = np.sqrt(np.sum((end_pos - start_pos)**2)) return dist
@author: MattiaV """ from MTM import matchTemplates import cv2 from skimage import io import matplotlib.pyplot as plt from template_opt import Template90 from track import make_batch import trackpy as tp fileID = r"C:\Users\MattiaV\Desktop\università\Interships\DynamicsOfGranularShapes\ParticleTracking\Images\First frames\*.png" images = io.imread_collection(fileID) temp_draft1 = images[0][320:433,520:650] temp0 = temp_draft1[23:92,28:105] Temp = Template90(temp0) list_template = Temp.create() features_match = {'N_obj':50, 'threshold':0.55, 'method':cv2.TM_CCOEFF_NORMED, 'max_overlap':0.22} batch = make_batch(images, list_template, features_match) t0 = tp.link(batch, 50, memory = 3) t1 = tp.filter_stubs(t0, 4) # Compare the number of particles in the unfiltered and filtered data. print('Before:', t0['particle'].nunique()) print('After:', t1['particle'].nunique()) plt.figure() tp.plot_traj(t1);
v = pims.ImageSequence(impath) # take reader that provides uint8! assert np.issubdtype(v.dtype, np.uint8) v0 = tp.invert_image(v[0]) v0_bp = tp.bandpass(v0, lshort=1, llong=9) expected_find = tp.grey_dilation(v0, separation=9) expected_find_bandpass = tp.grey_dilation(v0_bp, separation=9) expected_refine = tp.refine_com(v0, v0_bp, radius=4, coords=expected_find_bandpass) expected_refine = expected_refine[expected_refine['mass'] >= 140] expected_refine_coords = expected_refine[pos_columns].values expected_locate = tp.locate(v0, diameter=9, minmass=140) expected_locate_coords = expected_locate[pos_columns].values df = tp.locate(v0, diameter=9) df = df[(df['x'] < 64) & (df['y'] < 64)] expected_characterize = df[pos_columns + char_columns].values f = tp.batch(tp.invert_image(v), 9, minmass=140) f_crop = f[(f['x'] < 320) & (f['x'] > 280) & (f['y'] < 280) & (f['x'] > 240)] f_linked = tp.link(f_crop, search_range=5, memory=0) f_linked_memory = tp.link(f_crop, search_range=5, memory=2) link_coords = f_linked[pos_columns + ['frame']].values expected_linked = f_linked['particle'].values expected_linked_memory = f_linked_memory['particle'].values np.savez_compressed(npzpath, expected_find, expected_find_bandpass, expected_refine_coords, expected_locate_coords, link_coords, expected_linked, expected_linked_memory, expected_characterize)
def analyze(self, plot_gif=False): self.drifts = [] self.v_drift_mag = [] self.D_constants = [] self.D_constants2 = [] self.msd_slope = [] self.msd_intercept = [] self.mu_hats = [] self.ed = [] self.em = [] self.frames = [] self.dataframes = [] for i, path in enumerate(self.SXM_PATH): frames = SXMReader(path) self.frames.append(frames) self.NM_PER_PIXEL = frames.meters_per_pixel * 1e9 molecule_size, min_mass, max_mass, separation, min_size, max_ecc, adaptive_stop, search_range, _ = self.PARAMS[ i] f = tp.batch(frames, molecule_size, minmass=min_mass, separation=separation) t = tp.link(f, search_range=search_range, adaptive_stop=adaptive_stop) t1 = t[((t['mass'] > min_mass) & (t['size'] > min_size) & (t['ecc'] < max_ecc)) & (t['mass'] < max_mass)] t2 = tp.filter_stubs(t, 3) # Compare the number of particles in the unfiltered and filtered data. print('Before:', t['particle'].nunique()) print('After:', t2['particle'].nunique()) if plot_gif == True: moviename = "{}-{}".format(min(self.fileranges[i]), max(self.fileranges[i])) singlemoviefolder = self.MOVIE_FOLDER + moviename + "/" if not os.path.exists(singlemoviefolder): os.makedirs(singlemoviefolder) mpl.rcParams.update({'font.size': 14, 'font.weight': 'bold'}) mpl.rc('image', origin='lower') mpl.rc('text', usetex=False) mpl.rc('text', color='orange') fns = [] for j, frame in enumerate(frames): fig = plt.figure(figsize=(5, 5)) tp.plot_traj(t2[(t2['frame'] <= j)], superimpose=frames[j], label=True) fn = singlemoviefolder + "Image_{}.png".format( self.fileranges[i][j]) fig.savefig(fn) fns.append(fn) ax = plt.gca() # get the axis ax.set_ylim(ax.get_ylim()[::-1]) # invert the axis ax.xaxis.tick_top() # and move the X-Axis ax.yaxis.set_ticks(np.arange(0, 16, 1)) # set y-ticks ax.yaxis.tick_left() # remove right y-Ticks plt.clf() mpl.rc('text', color='black') images = [] for fn in fns: images.append(imageio.imread(fn)) imageio.mimsave(singlemoviefolder + moviename + '.gif', images, duration=0.5) self._cleanup_png(singlemoviefolder) # Compute drifts d = tp.compute_drift(t2) d.loc[0] = [0, 0] t3 = t2.copy() # Storing drifts self.drifts.append(d) # Method 1 of calculating D: variance of all displacements of Delta_t=1 displacements = self._calculate_displacements(t3) self.D_constants.append( (displacements.dx.var() + displacements.dy.var()) / 4) # r^2 = x^2 + y^2 = 2Dt + 2Dt self.mu_hats.append(np.mean(displacements[['dx', 'dy']], axis=0)) # Method 2 of calculating D: linear fit to MSD em = tp.emsd(t3, frames.meters_per_pixel * 1e9, self.DIFFUSION_TIME, max_lagtime=len(frames), detail=True) self.em.append(em) self.ed.append([em['<x>'], em['<y>']]) result = linregress(em.index[:-8] * self.DIFFUSION_TIME, em['msd'][:-8]) self.msd_slope.append(result.slope) self.msd_intercept.append(result.intercept) self.D_constants2.append(result.slope / 4) # Store dataframe for future analysis self.dataframes.append(t3) self.v_drift_mag = np.linalg.norm(self.mu_hats, 2, axis=1)
def linking_trackpy(features, field_in, dt, dxy, v_max=None, d_max=None, d_min=None, subnetwork_size=None, memory=0, stubs=1, time_cell_min=None, order=1, extrapolate=0, method_linking='random', adaptive_step=None, adaptive_stop=None, cell_number_start=1): """ Function to perform the linking of features in trajectories Parameters: features: pandas.DataFrame Detected features to be linked v_max: float speed at which features are allowed to move dt: float time resolution of tracked features dxy: float grid spacing of input data memory int number of output timesteps features allowed to vanish for to be still considered tracked subnetwork_size int maximim size of subnetwork for linking method_detection: str('trackpy' or 'threshold') flag choosing method used for feature detection method_linking: str('predict' or 'random') flag choosing method used for trajectory linking """ # from trackpy import link_df import trackpy as tp from copy import deepcopy # from trackpy import filter_stubs # from .utils import add_coordinates # calculate search range based on timestep and grid spacing if v_max is not None: search_range = int(dt * v_max / dxy) # calculate search range based on timestep and grid spacing if d_max is not None: search_range = int(d_max / dxy) # calculate search range based on timestep and grid spacing if d_min is not None: search_range = max(search_range, int(d_min / dxy)) if time_cell_min: stubs = np.floor(time_cell_min / dt) + 1 logging.debug('stubs: ' + str(stubs)) logging.debug('start linking features into trajectories') #If subnetwork size given, set maximum subnet size if subnetwork_size is not None: tp.linking.Linker.MAX_SUB_NET_SIZE = subnetwork_size # deep copy to preserve features field: features_linking = deepcopy(features) if method_linking is 'random': # link features into trajectories: trajectories_unfiltered = tp.link(features_linking, search_range=search_range, memory=memory, t_column='frame', pos_columns=['hdim_2', 'hdim_1'], adaptive_step=adaptive_step, adaptive_stop=adaptive_stop, neighbor_strategy='KDTree', link_strategy='auto') elif method_linking is 'predict': pred = tp.predict.NearestVelocityPredict(span=1) trajectories_unfiltered = pred.link_df( features_linking, search_range=search_range, memory=memory, pos_columns=['hdim_1', 'hdim_2'], t_column='frame', neighbor_strategy='KDTree', link_strategy='auto', adaptive_step=adaptive_step, adaptive_stop=adaptive_stop # copy_features=False, diagnostics=False, # hash_size=None, box_size=None, verify_integrity=True, # retain_index=False ) else: raise ValueError('method_linking unknown') # Filter trajectories to exclude short trajectories that are likely to be spurious # trajectories_filtered = filter_stubs(trajectories_unfiltered,threshold=stubs) # trajectories_filtered=trajectories_filtered.reset_index(drop=True) # Reset particle numbers from the arbitray numbers at the end of the feature detection and linking to consecutive cell numbers # keep 'particle' for reference to the feature detection step. trajectories_unfiltered['cell'] = None for i_particle, particle in enumerate( pd.Series.unique(trajectories_unfiltered['particle'])): cell = int(i_particle + cell_number_start) trajectories_unfiltered.loc[trajectories_unfiltered['particle'] == particle, 'cell'] = cell trajectories_unfiltered.drop(columns=['particle'], inplace=True) trajectories_bycell = trajectories_unfiltered.groupby('cell') for cell, trajectories_cell in trajectories_bycell: logging.debug("cell: " + str(cell)) logging.debug("feature: " + str(trajectories_cell['feature'].values)) logging.debug("trajectories_cell.shape[0]: " + str(trajectories_cell.shape[0])) if trajectories_cell.shape[0] < stubs: logging.debug("cell" + str(cell) + " is a stub (" + str(trajectories_cell.shape[0]) + "), setting cell number to Nan..") trajectories_unfiltered.loc[trajectories_unfiltered['cell'] == cell, 'cell'] = np.nan trajectories_filtered = trajectories_unfiltered #Interpolate to fill the gaps in the trajectories (left from allowing memory in the linking) trajectories_filtered_unfilled = deepcopy(trajectories_filtered) # trajectories_filtered_filled=fill_gaps(trajectories_filtered_unfilled,order=order, # extrapolate=extrapolate,frame_max=field_in.shape[0]-1, # hdim_1_max=field_in.shape[1],hdim_2_max=field_in.shape[2]) # add coorinates from input fields to output trajectories (time,dimensions) # logging.debug('start adding coordinates to trajectories') # trajectories_filtered_filled=add_coordinates(trajectories_filtered_filled,field_in) # add time coordinate relative to cell initiation: # logging.debug('start adding cell time to trajectories') trajectories_filtered_filled = trajectories_filtered_unfilled trajectories_final = add_cell_time(trajectories_filtered_filled) # add coordinate to raw features identified: logging.debug('start adding coordinates to detected features') logging.debug('feature linking completed') return trajectories_final
def Link(searchRange, memory, minFrames): t = tp.link(f, search_range = searchRange, memory = memory) t1 = tp.filter_stubs(t, minFrames) plt.figure() tp.plot_traj(t1, label = True) return t1
# take reader that provides uint8! assert np.issubdtype(v.dtype, np.uint8) v0 = tp.invert_image(v[0]) v0_bp = tp.bandpass(v0, lshort=1, llong=9) expected_find = tp.grey_dilation(v0, separation=9) expected_find_bandpass = tp.grey_dilation(v0_bp, separation=9) expected_refine = tp.refine_com(v0, v0_bp, radius=4, coords=expected_find_bandpass) expected_refine = expected_refine[expected_refine['mass'] >= 140] expected_refine_coords = expected_refine[pos_columns].values expected_locate = tp.locate(v0, diameter=9, minmass=140) expected_locate_coords = expected_locate[pos_columns].values df = tp.locate(v0, diameter=9) df = df[(df['x'] < 64) & (df['y'] < 64)] expected_characterize = df[pos_columns + char_columns].values f = tp.batch(tp.invert_image(v), 9, minmass=140) f_crop = f[(f['x'] < 320) & (f['x'] > 280) & (f['y'] < 280) & (f['x'] > 240)] f_linked = tp.link(f_crop, search_range=5, memory=0) f_linked_memory = tp.link(f_crop, search_range=5, memory=2) link_coords = f_linked[pos_columns + ['frame']].values expected_linked = f_linked['particle'].values expected_linked_memory = f_linked_memory['particle'].values np.savez_compressed(npzpath, expected_find, expected_find_bandpass, expected_refine_coords, expected_locate_coords, link_coords, expected_linked, expected_linked_memory, expected_characterize)
median = np.median(temp, axis=2) @pims.pipeline def removeBack(frame, median): frame = frame - median return frame frames = removeBack(frames, median) break #frames = cropImage(frames) #fig.savefig(SavePath + '/Line.jpg') f = tp.batch(frames, estimateFeatureSize, minmass=minMass) t = tp.link(f, 60, memory=80) t1 = tp.filter_stubs(t, 10) tp.plot_traj(t1) Ntrajs = np.max(np.array(t1['particle'])) + 1 minMoveDistance = 1 print('there are %s trajectories' % Ntrajs) t2 = t1[0:0] for i in range(Ntrajs): tNew = t1[t1['particle'] == i] if (len(tNew) < 30): continue #distData = tp.motion.msd(tNew,1,1,len(tNew)) #dist = distData.iloc[-1,:]['msd']
def link(self, *args, **kwargs): kwargs.update(self.linker_opts) return tp.link(args[0], validate_tuple(args[1], 2), *args[2:], **kwargs)