示例#1
0
    def test_no_drift(self):
        N = 10
        expected = DataFrame({'x': np.zeros(N), 'y': np.zeros(N)}).iloc[1:]
        expected = expected.astype('float')
        expected.index.name = 'frame'
        expected.columns = ['x', 'y']
        # ^ no drift measured for Frame 0

        actual = tp.compute_drift(self.dead_still)
        assert_frame_equal(actual, expected[['y', 'x']])

        actual_rolling = tp.compute_drift(self.dead_still, smoothing=2)
        assert_frame_equal(actual_rolling, expected[['y', 'x']])
示例#2
0
    def test_no_drift(self):
        N = 10
        expected = DataFrame({'x': np.zeros(N), 'y': np.zeros(N)}).iloc[1:]
        expected = expected.astype('float')
        expected.index.name = 'frame'
        expected.columns = ['x', 'y']
        # ^ no drift measured for Frame 0

        actual = tp.compute_drift(self.dead_still)
        assert_frame_equal(actual, expected)

        # Small random drift
        actual = tp.compute_drift(self.many_walks)
        assert_frame_equal(actual, expected)
示例#3
0
    def test_no_drift(self):
        N = 10
        expected = DataFrame({'x': np.zeros(N), 'y': np.zeros(N)}).iloc[1:]
        expected = expected.astype('float')
        expected.index.name = 'frame'
        expected.columns = ['x', 'y']
        # ^ no drift measured for Frame 0

        actual = tp.compute_drift(self.dead_still)
        assert_frame_equal(actual, expected)

        # Small random drift
        actual = tp.compute_drift(self.many_walks)
        assert_frame_equal(actual, expected)
示例#4
0
def GlobalEstimation(t_drift, drift_smoothing_frames):
    """ estimates the drift for all particles in a frame at once,
    makes use of trackpy's compute_drift function
    
    Attention: This ignores laminar flow, but needs fewer frames (and thus time) 
               to get a good estimation.
    """
    nd.logger.info("Mode: global drift correction (frame per frame)")
            
    # calculate the cumulated (!) overall drift (e.g. drift of setup or flow of particles)
    my_drift = tp.compute_drift(t_drift, drift_smoothing_frames) 
    
    # There is is a bug in tracky. If there is no particle in a frame, 
    # it will sometimes not calculate the drift in the next frame with a particle. 
    # So a small workaround here:
    
    # get a list of all frames
    full_index = t_drift.frame.sort_values().unique()

    # interpolate the missing frames (the ones without a particle in it)
    my_drift = my_drift.reindex(full_index)
    my_drift = my_drift.interpolate(method = 'linear')          

    # subtract overall drift from trajectories
    t_no_drift = tp.subtract_drift(t_drift.copy(), my_drift) 

    t_no_drift = t_no_drift.drop(columns = "frame").reset_index()

    return t_no_drift, my_drift
示例#5
0
    def test_constant_drift(self):
        N = 10
        expected = DataFrame({'x': np.arange(N), 'y': np.zeros(N)}).iloc[1:]
        expected = expected.astype('float')
        expected.index.name = 'frame'
        expected.columns = ['x', 'y']

        actual = tp.compute_drift(self.steppers)
        assert_frame_equal(actual, expected)
示例#6
0
    def test_constant_drift(self):
        N = 10
        expected = DataFrame({'x': np.arange(N), 'y': np.zeros(N)}).iloc[1:]
        expected = expected.astype('float')
        expected.index.name = 'frame'
        expected.columns = ['x', 'y']

        actual = tp.compute_drift(self.steppers)
        assert_frame_equal(actual, expected)
示例#7
0
def linkTrajectories(circles_tp, removeDrift=False):

    trajectories = tp.link_df(circles_tp, 5, memory=10)
    if removeDrift == True:
        drift = tp.compute_drift(trajectories)
        trajectories = tp.subtract_drift(trajectories.copy(), drift)

    return trajectories
    """
示例#8
0
def generateDriftPlots():

    directory = os.fsencode("../csv_raw/csv_raw_4")

    for file in os.listdir(directory):
        filename = os.fsdecode(file)
        if filename.endswith(".csv"):
            df = pd.DataFrame.from_csv("../csv_raw/csv_raw_4/" + str(filename))
            x = tp.compute_drift(df)
            plt.figure()
            filename = filename.split('.')[0]
            plt.title(str(filename))
            plt.xlabel('Frame Number')
            plt.ylabel('Pixels')
            plt.plot(x)
示例#9
0
    def __init__(self, trajectories_dataframe, particle_id=None):
        self.trajectories = trajectories_dataframe
        self.filtered = self.__filter_trajectories()
        self.drift = tp.compute_drift(self.filtered)

        self.disp_x = []
        self.disp_y = []

        for i in range(1, len(self.drift.x.values)):
            self.disp_x.append(self.drift.x.values[i] - self.drift.x.values[i - 1])
            self.disp_y.append(self.drift.y.values[i] - self.drift.y.values[i - 1])

        if particle_id is not None:
            self.single_trajectory = self.__get_single_trajectory(particle_id)
        else:
            self.single_trajectory = self.__get_first_single_trajectory()
示例#10
0
def frames2coords_cor(frames,params_locate_start={'diameter':11,'minmass_percentile':92},
                      params_filter={},
                      params_link_df={},
                     out_fh=None,
                     params_msd={},
                      subtract_drift=False,
                      force=False):
    params_locate=get_params_locate(frames[0],out_fh=out_fh,**params_locate_start)
    print(params_locate)
    logging.info('getting coords')
    t_flt=frames2coords(frames=frames,out_fh=out_fh,
                        params_locate=params_locate,params_msd=params_msd,params_link_df=params_link_df,
                        force=force,**params_filter)        
    if subtract_drift:
        d = tp.compute_drift(t_flt)
        t_cor = tp.subtract_drift(t_flt, d)
        return t_cor
    else:
        return t_flt        
示例#11
0
def nd2msd(nd_fh):
    # print nd_fh
    frames=pims.ND2_Reader(nd_fh)
    logging.info('number of frames = %d' % len(np.shape(frames)))
    if len(np.shape(frames))==4:
        frames = average_z(frames)
    threshold=np.percentile(frames,75)
    f_batch = tp.batch(frames,diameter=11,threshold=threshold)

    t = tp.link_df(f_batch, search_range=11, memory=3)
    t_flt = tp.filter_stubs(t, 3*int(len(frames)/4))
    try:
        d = tp.compute_drift(t_flt)
        t_cor = tp.subtract_drift(t_flt, d)
    except:
        t_cor=t_flt
        logging.info("drift correction excepted")    
    # plt.figure()
    # tp.plot_traj(t_flt)
    # plt.figure()
    # d.plot()
    imsd=tp.imsd(t_cor,0.1,0.2, max_lagtime=100, statistic='msd')
    emsd=tp.emsd(t_cor,0.1,0.2, max_lagtime=100)
    return imsd,emsd
示例#12
0
def DriftCorrection(t_drift, ParameterJsonFile, Do_transversal_drift_correction = None, drift_smoothing_frames = None, rolling_window_size = None, min_particle_per_block = None, min_tracking_frames = None, PlotGlobalDrift = False, PlotDriftAvgSpeed = False, PlotDriftTimeDevelopment = False, PlotDriftFalseColorMapFlow = False, PlotDriftVectors = False, PlotDriftFalseColorMapSpeed = False, PlotDriftCorrectedTraj = False):
    
    """
    Calculate and remove overall drift from trajectories
    
    The drift needs to be removed, because the entire movement consists of brownian motion and drift
    In order to measure the brownian motion, the drift needs to be calculated and subtracted
    
    There are currently three options to choose from
    1) No drift correction - this is dangerous. However, if just a few particles are tracked the 
    average drift is most the particles movement and thus the trajectory vanishes!
    
    2) Global Drift
    Calculated the drift of all particles between neighbouring frames
    
    3) Transversal drift corretion
    Splits the fiber in several "subfibers". Each of them is treated independent. This is motivated by the idea of laminar
    flow, where particles on the side have a lower current than the ones in the middle
    However this method requires a lot of particles and makes sense for small fiber diameters where laminar flow is
    significant.
    
    """
    settings = nd.handle_data.ReadJson(ParameterJsonFile)
    
    ApplyDriftCorrection = settings["Drift"]["Apply"]    
    
    if ApplyDriftCorrection == 0:
        t_no_drift = t_drift
        
    else:
        
        if settings["Help"]["Drift"] == "auto":
            num_particles_per_frame = t_drift.groupby("frame")["particle"].count().mean()

            nd.ParameterEstimation.Drift(ParameterJsonFile, num_particles_per_frame)

        
        
        Do_transversal_drift_correction = settings["Drift"]["Do transversal drift correction"]    
        drift_smoothing_frames          = settings["Drift"]["Drift smoothing frames"]    
        rolling_window_size             = settings["Drift"]["Drift rolling window size"]    
        min_particle_per_block          = settings["Drift"]["Min particle per block"]    
        min_tracking_frames             = settings["Link"]["Min_tracking_frames"]



        if Do_transversal_drift_correction == False:
            print('Mode: global drift correction')
            # That's not to be used if y-depending correction (next block) is performed!
            
            # Attention: Strictly this might be wrong:
            # Drift might be different along y-positions of channel.
            # It might be more appropriate to divide into subareas and correct for drift individually there
            # That's done if Do_transversal_drift_correction==1
            my_drift = tp.compute_drift(t_drift, drift_smoothing_frames) # calculate the overall drift (e.g. drift of setup or flow of particles)
            """
            this is a bug in tracky. If there is no particle in a frame, it will sometimes not calculate the drift 
            in the next frame with a particle. So a small workaround here
            """

            full_index = t_drift.frame.sort_values().unique()
#            full_index = t_drift.index.unique()
            # oldfull_index = t_drift.sort_values("frame").frame.unique()

            my_drift = my_drift.reindex(full_index)
            my_drift = my_drift.interpolate(method = 'linear')          

            t_no_drift = tp.subtract_drift(t_drift.copy(), my_drift) # subtract overall drift from trajectories (creates new dataset)

            t_no_drift = t_no_drift.drop(columns = "frame").reset_index()
                
#            my_a = t_drift[t_drift.particle==103].x
#            my_b = t_no_drift[t_no_drift.particle==103].x
#            
#            plt.plot(my_drift,'o')
#            plt.plot(my_a,'x')
#            plt.plot(my_b,'x')
#            plt.plot(my_a - my_b,'x')

            
            if PlotGlobalDrift == True:
                nd.visualize.PlotGlobalDrift(my_drift) # plot the calculated drift
        
        
        else:
            print('Mode: transversal correction')
            
            # Y-Depending drift-correction
            # RF: Creation of y-sub-zones and calculation of drift
            # SW 180717: Subtraction of drift from trajectories
        
            
            # how many particles are needed to perform a drift correction
            #min_particle_per_block = 40
            
        #    # use blocks above and below for averaging (more particles make drift correction better)
        #    # e.g. 2 means y subarea itself and the two above AND below
        #    rolling_window_size = 5
        
            average_over_total_block = 2 * rolling_window_size + 1
              
            # sort y values to have in each sub area the same amount of particles
            all_y_sorted = t_drift.y.values.copy()
            all_y_sorted.sort()
            
            y_min = all_y_sorted[0];   # min y of tracked particle
            y_max = all_y_sorted[-1];   # max y of tracked particle
            num_data_points = len(all_y_sorted)
            
            # total number of captured frames
            num_frames = t_drift.index.max() - t_drift.index.min() + 1
            
            # this is difficult to explain
            # we have >num_data_points< data points and want to split them such, that each sub y has >min_particle_per_block< in it
            # in each frame >num_frames<
            # Because of the averaging with neighbouring areas the effective number is lifted >average_over_total_block<
            #start: distribute num_data_points over all number of frames and blocks
            number_blocks = int(num_data_points / min_particle_per_block / num_frames * average_over_total_block)
            
            
            #sub_y = np.linspace(y_min,y_max,number_blocks+1)
            sub_y = np.zeros(number_blocks+1)
            
            for x in range(0,number_blocks):
                use_index = int(num_data_points * (x / number_blocks))
                sub_y[x] = all_y_sorted[use_index]
            sub_y[-1] = y_max
            
            #average y-range for later
            y_range = (sub_y[:-1] + sub_y[1:]) / 2
            
            # delete variable to start again
            if 'calc_drift' in locals():
                del calc_drift 
                del total_drift
                total_drift = pd.DataFrame(columns = ['y','x','frame'])
                calc_drift_diff = pd.DataFrame()
    
            # Creating a copy of t1 which will contain a new column with each values ysub-position
            t_drift_ysub = t_drift.copy()
            t_drift_ysub['ysub']=np.nan # Defining values to nan
                
            for x in range(0,number_blocks):
                print(x)
                   
                # calc which subareas of y are in the rolling window.
                sub_y_min = x - rolling_window_size
                if sub_y_min  < 0:
                    sub_y_min = 0
                
                sub_y_max = x + 1 + rolling_window_size
                if sub_y_max  > number_blocks:
                    sub_y_max = number_blocks;
                    
                # select which particles are in the current subarea
                use_part = (t_drift['y'] >= sub_y[sub_y_min]) & (t_drift['y'] < sub_y[sub_y_max])
                use_part_subtract = (t_drift['y'] >= sub_y[x]) & (t_drift['y'] < sub_y[x+1]) 
                
                # get their indices
                use_part_index = np.where(np.array(use_part)==True)   # find index of elements true
                
                # WHAT IS THIS VARIABLE ACTUALLY GOOD FOR?
                use_part_subtract_index = np.where(np.array(use_part_subtract)==True)
                
                # Writing x as ysub into copy of t1. That's needed to treat data differently depending on y-sub-position
                # Python 3.5 t1_ysub['ysub'].iloc[use_part_subtract_index]=x # I believe that's not an elegant way of doing it. Maybe find a better approach       
                t_drift_ysub.loc[use_part,'ysub'] = x # RF 180906
                
        
                
                # how many particles are in each frames
                use_part_index = list(use_part_index)[0]
                use_part_subtract_index = list(use_part_subtract_index)[0]
                num_particles_block = len(use_part_index)
                    
                # check if drift_smoothing_frames is not longer than the video is long
                num_frames = settings["ROI"]["frame_max"] - settings["ROI"]["frame_min"] + 1
                if num_frames < drift_smoothing_frames:
                    sys.exit("Number of frames is smaller than drift_smoothing_frames")
    
    #            raise NameError('HiThere')
                # make the drift correction with the subframe
                calc_drift_y = tp.compute_drift(t_drift.iloc[use_part_index], drift_smoothing_frames) # calculate the drift of this y block
                calc_drift_y_diff=calc_drift_y.diff(periods=1).fillna(value=0)
                calc_drift_y_diff['ysub']=x
                    
                calc_drift_y['frame'] = calc_drift_y.index.values
                calc_drift_y['y_range'] = y_range[x]
            
                # calculate entire drift with starting and end position
                start_pos = calc_drift_y.set_index('y_range')[['y', 'x', 'frame']].iloc[[0],:]
                end_pos = calc_drift_y.set_index('y_range')[['y', 'x', 'frame']].iloc[[-1],:]
                my_total = end_pos - start_pos
                my_total ['num_particles'] = num_particles_block
                
                if x == 0:
                    calc_drift = calc_drift_y
                    total_drift = my_total
                    calc_drift_diff = calc_drift_y_diff # that's going to be the look-up for the drift, depending on y-sub
                    
                else:
                    calc_drift = pd.concat([calc_drift, calc_drift_y])  #prepare additional index
                    total_drift = pd.concat([total_drift, my_total])
                    calc_drift_diff = pd.concat([calc_drift_diff,calc_drift_y_diff])
            
            # to distinguish that we're not looking at positions but the difference of positions
            calc_drift_diff=calc_drift_diff.rename(columns={'x':'x_diff1', 'y':'y_diff1'}) 
            
            # Adding frame as a column
            calc_drift_diff['frame']=calc_drift_diff.index 
            
            # Indexing by y-sub-area and frame
            calc_drift_diff=calc_drift_diff.set_index(['ysub','frame']) 
            
            # Adding frame as a calumn to particle data
            t_drift_ysub['frame']=t_drift_ysub.index 
            
            # Indexing particle-data analogously to drift-lookup
            t_drift_ysub=t_drift_ysub.set_index(['ysub','frame']) 
            
            # Adding drift-lookup into particle data, using frame and ysub
            t_drift_ysub_diff=pd.merge(t_drift_ysub,calc_drift_diff, left_index=True, right_index=True, how='inner') 
            
            # Releasing frame from index -> allows to sort easier by frame
            t_drift_ysub_diff=t_drift_ysub_diff.reset_index('frame') 
            
            cumsums_x=t_drift_ysub_diff.sort_values(['particle','frame']).groupby(by='particle')['x_diff1'].cumsum() 
            
            # Calculating particle history in x direction:
            # sorting by particle first, then frame, grouping then by particle and calculating the cumulative sum of displacements
            cumsums_y=t_drift_ysub_diff.sort_values(['particle','frame']).groupby(by='particle')['y_diff1'].cumsum()
            # same in y-direction
            
             # Sorting particle data in the same way
            t_no_drift_ysub_diff_sort=t_drift_ysub_diff.sort_values(['particle','frame'])
            
             # UNSICHER: + oder - ?????
            t_no_drift_ysub_diff_sort['x_corr']=t_drift_ysub_diff.sort_values(['particle','frame'])['x']-cumsums_x
            
            # subtracting drift-history for each particle
             # UNSICHER: + oder - ?????
            t_no_drift_ysub_diff_sort['y_corr']=t_drift_ysub_diff.sort_values(['particle','frame'])['y']-cumsums_y
            # same in y-direction
            
            #tm_sub=t1_ysub_diff_sort.copy()
            # just giving a more descriptive name to particle data
            t_no_drift_sub = t_no_drift_ysub_diff_sort 
            
            # dropping axes that wouldn't be needed any longer
            t_no_drift_sub = t_no_drift_sub.drop(['x', 'y', 'x_diff1', 'y_diff1'], axis=1) 
            
            #tm_sub=tm_sub.rename(columns={'x':'x', 'y':'y'}) 
            # renaming the corrected position into original names to keep the remaining code working with it
            t_no_drift_sub = t_no_drift_sub.rename(columns={'x_corr':'x', 'y_corr':'y'}) 
            
            # Bringing tm_sub back into a format that later parts of the code need to work with it
            t_no_drift_sub_store=t_no_drift_sub.copy()
            
            # Forgetting about ysub - which isn't needed anymore - and making frame the only index again
            t_no_drift_sub.set_index('frame', drop=True, inplace=True) 
            
            # Sorting by frame
            t_no_drift_sub=t_no_drift_sub.sort_index() 
            
            # Adding frame as a column
            t_no_drift_sub['frame'] = t_no_drift_sub.index 
            
            #reindex https://stackoverflow.com/questions/25122099/move-column-by-name-to-front-of-table-in-pandas
            cols = t_no_drift_sub.columns.tolist()
            cols.insert(0, cols.pop(cols.index("y")))
            cols.insert(0, cols.pop(cols.index("x")))
            
            t_no_drift_sub = t_no_drift_sub.reindex(columns= cols)# Ordering as needed later
            
            # Set this, if y-depending-drift-correction is to be used
            t_no_drift = t_no_drift_sub 
            
    #        t_no_drift = tp.filter_stubs(t_no_drift, min_tracking_frames) 
            t_no_drift = t_no_drift.sort_values('frame')
            
            # insert y_range
            # total_drift.index = y_range RF180906 is that needed?
                
            # set two new indices - first frame than y_range        
            calc_drift = calc_drift.set_index(['frame'])
            
            # calc velocity as deviation of drift
            # average speed for display
            avg_frames = 30
            calc_drift[['velocity_y', 'velocity_x','new_y_range']] = calc_drift[['y','x','y_range']].diff(avg_frames)/avg_frames
            
            
            # Delete lines where new y range begins
            # ronny does not like python yet
            calc_drift_copy = calc_drift[abs(calc_drift['new_y_range']) == 0].copy()
            
            # still not...
            del calc_drift
            calc_drift = calc_drift_copy.copy()
            del calc_drift_copy
            
            # Do some plotting of the drift stuff
            
            
            if PlotDriftAvgSpeed == True:
                nd.visualize.DriftAvgSpeed()
               
            if PlotDriftTimeDevelopment == True:
                nd.visualize.DriftTimeDevelopment()  
        
            if PlotDriftFalseColorMapFlow == True:
                nd.visualize.DriftFalseColorMapFlow(calc_drift, number_blocks, y_range)
            
            if PlotDriftVectors == True:
                nd.visualize.DriftVectors()
        
            if PlotDriftFalseColorMapSpeed == True:
                nd.visualize.DriftFalseColorMapSpeed()
        
            if PlotDriftCorrectedTraj == True:
                nd.visualize.DriftCorrectedTraj()
        
        print('drift correction --> finished')
        
        nd.handle_data.WriteJson(ParameterJsonFile, settings) 

    
    return t_no_drift
示例#13
0
def cellcfg2distances(cellcfg,
                    # for 150x150 images
                    params={'locate':{'diameter':11, # round to odd number
                                      'noise_size':1,
                                      'separation':15,
                                      'threshold':4000,
                                      'preprocess':True,
                                      'invert':False,
                                      'max_iterations':50,
                                      'percentile':0,
                                      'engine':'numba',
                                      },
                    'link_df':{
                               'search_range':5,
                               'memory':1,
                               'link_strategy':'drop',},
                    'filter_stubs':{'threshold':4},
                    'get_distance_from_centroid':{'center':[75,75]},
#                     'msd':{'mpp':0.0645,'fps':0.2, 'max_lagtime':100},
                           },
                    test=False,force=False):
    params['locate']['separation']=params['locate']['diameter']*1
    params['locate']['threshold']=cellcfg['signal_cytoplasm']*0.5
    params['link_df']['search_range']=params['locate']['diameter']*0.33
            
    to_dict(params,f"{cellcfg['outp']}/params.yml")
    
    if not test_locate_particles(cellcfg,params['locate'],force=force,test=False):
        print(cellcfg['cfgp'])
        return 
    # get trajectories
    steps=['locate','link_df','filter_stubs','filter_returns','subtract_drift','distance']
    dn2dp={s:f"{cellcfg['outp']}/d{si}{s}.tsv" for si,s in enumerate(steps)}
    dn2plotp_suffix={s:f"{si}{s}.png" for si,s in enumerate(steps)}
    steps_done=[k for k in dn2dp if exists(dn2dp[k])]
    
    if ('distance' in steps_done) and not force:
       print(cellcfg['cfgp'])
       return
           
    from htsimaging.lib.plot import image_trajectories
    from htsimaging.lib.stat import get_distance_from_centroid
           
    img_gfp=np.load(cellcfg['cellgfpmaxp'])
    img_bright=np.load(cellcfg['cellbrightp'])
    
    dn2df={}
    dn2df['locate']=tp.batch([np.load(p) for p in sorted(cellcfg['cellframes_masked_substracted'])],
                             **params['locate'])
    if len(dn2df['locate'])==0:
        return
    dn2df['locate']['frame']=dn2df['locate']['frame'].astype(np.integer)
    dn2df['link_df']=tp.link_df(dn2df['locate'], **params['link_df'])
#     if params['link_df']['memory']!=0:
    dn2df['link_df']=fill_frame_jumps(dn2df['link_df'],
                      jump_length=2 if params['link_df']['memory']==0 else params['link_df']['memory']+1)
#     to_table(dn2df['link_df'],'test.tsv')
#     to_table(dn2df['link_df'],dn2dp['link_df'])
    image_trajectories(dtraj=dn2df['link_df'], 
                       img_gfp=img_gfp, 
                       img_bright=img_bright, fig=None, ax=None)
    savefig(f"{cellcfg['plotp']}/image_trajectories_{dn2plotp_suffix['link_df']}")
#     to_table(dn2df['link_df'],dn2dp['link_df'])

    
    dn2df['filter_stubs']=tp.filter_stubs(dn2df['link_df'], threshold=params['filter_stubs']['threshold'])
    dn2df['filter_stubs'].index.name='index'
    dn2df['filter_stubs'].index=range(len(dn2df['filter_stubs']))
    if len(dn2df['filter_stubs'])==0:
        to_table(dn2df['filter_stubs'],dn2dp['distance'])
        print(cellcfg['cfgp'])
        return 
    image_trajectories(dtraj=dn2df['filter_stubs'], 
                       img_gfp=img_gfp, 
                       img_bright=img_bright, fig=None, ax=None)
    savefig(f"{cellcfg['plotp']}/image_trajectories_{dn2plotp_suffix['filter_stubs']}")
    
    dn2df['filter_returns']=get_distance_from_centroid(dn2df['filter_stubs'],**params['get_distance_from_centroid'])
    dn2df['filter_returns']=trim_returns(dn2df['filter_returns'])
    savefig(f"{cellcfg['plotp']}/image_trajectories_stats_trimming_{dn2plotp_suffix['filter_returns']}")

    dn2df['filter_returns']=tp.filter_stubs(dn2df['filter_returns'], threshold=params['filter_stubs']['threshold'])
    dn2df['filter_returns'].index.name='index'
    dn2df['filter_returns'].index=range(len(dn2df['filter_returns']))
    if len(dn2df['filter_returns'])==0:
        to_table(dn2df['filter_returns'],dn2dp['distance'])
        print(cellcfg['cfgp'])
        return             
    image_trajectories(dtraj=dn2df['filter_stubs'], 
                       img_gfp=img_gfp, 
                       img_bright=img_bright, fig=None, ax=None)
    savefig(f"{cellcfg['plotp']}/image_trajectories_{dn2plotp_suffix['filter_returns']}")

    d = tp.compute_drift(dn2df['filter_returns'])
    dn2df['subtract_drift'] = tp.subtract_drift(dn2df['filter_stubs'], d)
    image_trajectories(dtraj=dn2df['subtract_drift'], 
                       img_gfp=img_gfp, 
                       img_bright=img_bright, fig=None, ax=None)
    savefig(f"{cellcfg['plotp']}/image_trajectories_{dn2plotp_suffix['subtract_drift']}")

    dn2df['distance']=get_distance_from_centroid(dn2df['subtract_drift'],**params['get_distance_from_centroid'])
    from htsimaging.lib.stat import get_distance_travelled
    dn2df['distance']=get_distance_travelled(t_cor=dn2df['distance'])
    
    for k in dn2df:
        to_table(dn2df[k],dn2dp[k])
示例#14
0
    squared_displacements = []
    a = coeff[0]
    for i in range(len(x)):
        disp = (1 / (1 + a**2)) * ((y[i] - y_approx[i])**2) / len(x)
        squared_displacements.append(disp)
    plt.title('MSD')
    plt.hist(squared_displacements, 100)
    plt.show()


with tp.PandasHDFStore('data.h5') as store:
    trajectories = pd.concat(iter(store))
    #filtered = tp.filter_stubs(trajectories)
    filtered = trajectories
    drift = tp.compute_drift(filtered)

    im = tp.imsd(filtered, 1, 1)
    plt.plot(im.index, im, 'k-', alpha=0.1)
    plt.xscale('log')
    plt.yscale('log')
    plt.title("Mean squared displacement for each particle")
    plt.show()

    disp_x = []
    disp_y = []
    for i in range(1, len(drift.x.values)):
        disp_x.append(drift.x.values[i] - drift.x.values[i - 1])
        disp_y.append(drift.y.values[i] - drift.y.values[i - 1])

    plt.figure(dpi=300)
示例#15
0
import trackpy as tp
import os


def plotTraj(i):
    particle = df.loc[df['particle'] == i]
    plt.plot(particle['frame'], particle['x'])
    turning_index = particle['y'].idxmax()
    bef = particle[:turning_index]
    aft = particle[turning_index:]
    print(turning_index)
    return bef, aft


df = pd.DataFrame.from_csv('../csvs/extract.csv')
x = tp.compute_drift(df)
x.plot()

#particles = set(df['particle'])
#
#for i in particles:
#    plotTraj(i)


def returnParticles(middle):
    filtered = []
    turning_indices = []
    for i in pivoted_y:
        turning_index = pivoted_y[i].idxmax()
        if abs(turning_index - middle) < 10:
            filtered.append(i)
示例#16
0
def TransversalEstimation(settings, t_drift, drift_smoothing_frames, rolling_window_size, min_particle_per_block):
    """
    Y-Depending drift-correction
    RF: Creation of y-sub-zones and calculation of drift
    SW 180717: Subtraction of drift from trajectories
    """
    nd.logger.info("Mode: transversal drift correction (laminar flow)")

    
    nd.logger.warning('That code should work but not ideal sure. Have a look if you rely on it!')
            
    
    # how many particles are needed to perform a drift correction
    #min_particle_per_block = 40
    
#    # use blocks above and below for averaging (more particles make drift correction better)
#    # e.g. 2 means y subarea itself and the two above AND below
#    rolling_window_size = 5

    #total number of averaged blocks
    average_over_total_block = 2 * rolling_window_size + 1
      
    # sort y values to have in each sub area the same amount of particles
    all_y_sorted = t_drift.y.values.copy()
    all_y_sorted.sort()
    
    y_min = all_y_sorted[0];   # min y of tracked particle
    y_max = all_y_sorted[-1];   # max y of tracked particle
    num_data_points = len(all_y_sorted)
    
    # total number of captured frames
    num_frames = t_drift.index.max() - t_drift.index.min() + 1
    
    """
    This is difficult to explain =/
    We have >num_data_points< data points and want to split them such, that each sub y has >min_particle_per_block< in it in each frame >num_frames<.
    Because of the averaging with neighbouring areas the effective number is lifted >average_over_total_block<
    start: distribute num_data_points over all number of frames and blocks
    """
    
    # calculate how many transversal blocks we can form
    number_blocks = int(num_data_points / min_particle_per_block / num_frames * average_over_total_block)
    
    # get the y-coordinates of all particles in a block
    sub_y = np.zeros(number_blocks+1)
    
    for x in range(0,number_blocks):
        use_index = int(num_data_points * (x / number_blocks))
        sub_y[x] = all_y_sorted[use_index]
    sub_y[-1] = y_max
    
    #average y-range in each block
    y_range = (sub_y[:-1] + sub_y[1:]) / 2
    
    # delete variable to start again
    if 'calc_drift' in locals():
        del calc_drift 
        del total_drift
        total_drift = pd.DataFrame(columns = ['y','x','frame'])
        calc_drift_diff = pd.DataFrame()

    # Creating a copy of the trajectory which will contain a new column with each values ysub-position
    t_drift_ysub = t_drift.copy()
    t_drift_ysub['ysub']=np.nan # Defining values to nan
    
    # loop the drift correction over the transversal blocks
    for x in range(0,number_blocks):
        print(x)
           
        # calc which subareas of y are in the rolling window.
        sub_y_min = x - rolling_window_size
        if sub_y_min  < 0:
            sub_y_min = 0
        
        sub_y_max = x + 1 + rolling_window_size
        if sub_y_max  > number_blocks:
            sub_y_max = number_blocks;
            
        # select which particles are in the surrounding blocks for the averaging
        use_part = (t_drift['y'] >= sub_y[sub_y_min]) & (t_drift['y'] < sub_y[sub_y_max])
        
        # select which particles are in the current block and are corrected
        use_part_subtract = (t_drift['y'] >= sub_y[x]) & (t_drift['y'] < sub_y[x+1]) 
        
        # get their indices
        use_part_index = np.where(np.array(use_part)==True)   # find index of elements true
        
        # WHAT IS THIS VARIABLE ACTUALLY GOOD FOR?
        use_part_subtract_index = np.where(np.array(use_part_subtract)==True)
        
        # Writing x as ysub into copy of t1. That's needed to treat data differently depending on y-sub-position
        # Python 3.5 t1_ysub['ysub'].iloc[use_part_subtract_index]=x 
        # I believe that's not an elegant way of doing it. Maybe find a better approach       
        t_drift_ysub.loc[use_part,'ysub'] = x # RF 180906
        

        
        # how many particles are in each frames
        use_part_index = list(use_part_index)[0]
        use_part_subtract_index = list(use_part_subtract_index)[0]
        num_particles_block = len(use_part_index)
            
        # check if drift_smoothing_frames is not longer than the video is long
        num_frames = settings["ROI"]["frame_max"] - settings["ROI"]["frame_min"] + 1
        if num_frames < drift_smoothing_frames:
            sys.exit("Number of frames is smaller than drift_smoothing_frames")


        # make the drift correction with the subframe
        calc_drift_y = tp.compute_drift(t_drift.iloc[use_part_index], drift_smoothing_frames)
        
        # calculate the drift of this y block
        calc_drift_y_diff=calc_drift_y.diff(periods=1).fillna(value=0)
        calc_drift_y_diff['ysub']=x
            
        calc_drift_y['frame'] = calc_drift_y.index.values
        calc_drift_y['y_range'] = y_range[x]
    
        # calculate entire drift with starting and end position
        start_pos = calc_drift_y.set_index('y_range')[['y', 'x', 'frame']].iloc[[0],:]
        end_pos = calc_drift_y.set_index('y_range')[['y', 'x', 'frame']].iloc[[-1],:]
        my_total = end_pos - start_pos
        my_total ['num_particles'] = num_particles_block
        
        if x == 0:
            calc_drift = calc_drift_y
            total_drift = my_total
            calc_drift_diff = calc_drift_y_diff # that's going to be the look-up for the drift, depending on y-sub
            
        else:
            calc_drift = pd.concat([calc_drift, calc_drift_y])  #prepare additional index
            total_drift = pd.concat([total_drift, my_total])
            calc_drift_diff = pd.concat([calc_drift_diff,calc_drift_y_diff])
    
    # to distinguish that we're not looking at positions but the difference of positions
    calc_drift_diff=calc_drift_diff.rename(columns={'x':'x_diff1', 'y':'y_diff1'}) 
    
    # Adding frame as a column
    calc_drift_diff['frame']=calc_drift_diff.index 
    
    # Indexing by y-sub-area and frame
    calc_drift_diff=calc_drift_diff.set_index(['ysub','frame']) 
    
    # Adding frame as a calumn to particle data
    t_drift_ysub['frame']=t_drift_ysub.index 
    
    # Indexing particle-data analogously to drift-lookup
    t_drift_ysub=t_drift_ysub.set_index(['ysub','frame']) 
    
    # Adding drift-lookup into particle data, using frame and ysub
    t_drift_ysub_diff=pd.merge(t_drift_ysub,calc_drift_diff, left_index=True, right_index=True, how='inner') 
    
    # Releasing frame from index -> allows to sort easier by frame
    t_drift_ysub_diff=t_drift_ysub_diff.reset_index('frame') 
    
    cumsums_x=t_drift_ysub_diff.sort_values(['particle','frame']).groupby(by='particle')['x_diff1'].cumsum() 
    
    # Calculating particle history in x direction:
    # sorting by particle first, then frame, grouping then by particle and calculating the cumulative sum of displacements
    cumsums_y=t_drift_ysub_diff.sort_values(['particle','frame']).groupby(by='particle')['y_diff1'].cumsum()
    # same in y-direction
    
     # Sorting particle data in the same way
    t_no_drift_ysub_diff_sort=t_drift_ysub_diff.sort_values(['particle','frame'])
    
     # UNSICHER: + oder - ?????
    t_no_drift_ysub_diff_sort['x_corr']=t_drift_ysub_diff.sort_values(['particle','frame'])['x']-cumsums_x
    
    # subtracting drift-history for each particle
     # UNSICHER: + oder - ?????
    t_no_drift_ysub_diff_sort['y_corr']=t_drift_ysub_diff.sort_values(['particle','frame'])['y']-cumsums_y
    # same in y-direction
    
    #tm_sub=t1_ysub_diff_sort.copy()
    # just giving a more descriptive name to particle data
    t_no_drift_sub = t_no_drift_ysub_diff_sort 
    
    # dropping axes that wouldn't be needed any longer
    t_no_drift_sub = t_no_drift_sub.drop(['x', 'y', 'x_diff1', 'y_diff1'], axis=1) 
    
    #tm_sub=tm_sub.rename(columns={'x':'x', 'y':'y'}) 
    # renaming the corrected position into original names to keep the remaining code working with it
    t_no_drift_sub = t_no_drift_sub.rename(columns={'x_corr':'x', 'y_corr':'y'}) 
    
    # Bringing tm_sub back into a format that later parts of the code need to work with it
    t_no_drift_sub_store=t_no_drift_sub.copy()
    
    # Forgetting about ysub - which isn't needed anymore - and making frame the only index again
    t_no_drift_sub.set_index('frame', drop=True, inplace=True) 
    
    # Sorting by frame
    t_no_drift_sub=t_no_drift_sub.sort_index() 
    
    # Adding frame as a column
    t_no_drift_sub['frame'] = t_no_drift_sub.index 
    
    #reindex https://stackoverflow.com/questions/25122099/move-column-by-name-to-front-of-table-in-pandas
    cols = t_no_drift_sub.columns.tolist()
    cols.insert(0, cols.pop(cols.index("y")))
    cols.insert(0, cols.pop(cols.index("x")))
    
    t_no_drift_sub = t_no_drift_sub.reindex(columns= cols)# Ordering as needed later
    
    # Set this, if y-depending-drift-correction is to be used
    t_no_drift = t_no_drift_sub 
    
#        t_no_drift = tp.filter_stubs(t_no_drift, min_tracking_frames) 
    t_no_drift = t_no_drift.sort_values('frame')
    
    # insert y_range
    # total_drift.index = y_range RF180906 is that needed?
        
    # set two new indices - first frame than y_range        
    calc_drift = calc_drift.set_index(['frame'])
    
    # calc velocity as deviation of drift
    # average speed for display
    avg_frames = 30
    calc_drift[['velocity_y', 'velocity_x','new_y_range']] = calc_drift[['y','x','y_range']].diff(avg_frames)/avg_frames
    
    
    # Delete lines where new y range begins
    # ronny does not like python yet
    calc_drift_copy = calc_drift[abs(calc_drift['new_y_range']) == 0].copy()
    
    # still not...
    del calc_drift
    calc_drift = calc_drift_copy.copy()
    
    return t_no_drift, total_drift, calc_drift, number_blocks, y_range
示例#17
0
tp.mass_size(tra1.groupby('particle').mean());  ##plots size vs mass




fig,(ax1,ax2,ax3)=plt.subplots(1,3)
fig.suptitle("Trajectories with/without Overall Drift")


plt.figure()
tp.plot_traj(tra1);




d =tp.compute_drift(tra1)    			##subtract overall drift from trajectory
d.plot()
plt.show()
tm=tp.subtract_drift(tra1.copy(),d)   		##plot filtered trajectory
ax=tp.plot_traj(tm)
plt.show()


##MSD Calculation and Plot
im=tp.imsd(tm,1/5.,60)    ##microns per pixel, frames per second=60
fig, ax = plt.subplots()
ax.plot(im.index, im, 'k-', alpha=0.1)  # black lines, semitransparent
ax.set(ylabel=r'$\langle \Delta r^2 \rangle$ [$\mu$m$^2$]',
       xlabel='lag time $t$',title='MSD')
ax.set_xscale('log')
ax.set_yscale('log')
示例#18
0
#     plt.figure()
#     tp.annotate(traj3[traj3['frame'] == 0], frames[0]);
#==============================================================================
     
    traj3.drop(traj3.columns[[2,3,4,5,6,7]], axis = 1, inplace = True) # keep only x,y,frame,particle
     
#==============================================================================
#     plt.figure()
#     tp.plot_traj(traj3);
#==============================================================================
     
     
    
    ## Drift 
    # Calculate the drift
    drift = tp.compute_drift(traj,smoothing = 20) # return the cumsum of <dx,dy>
    
#==============================================================================
#     plt.figure() # plot <dx,dy>
#     drift.plot();
#==============================================================================
    
     # Substract the drift
    traj_f = tp.subtract_drift(traj3.copy(), drift) # final trajectories
    no = traj_f.groupby('frame').size() # no. of particle per frame
    print('Activity', act, 'particels', no.mean())

#==============================================================================
#     plt.figure()
#     tp.plot_traj(traj_f); 
#==============================================================================
示例#19
0
#fig=tp.annotate(f, aa[0])
#fig.figure.savefig("./trackpyResult/trackpyAnnotation.jpg")
#f = tp.batch(aa[:], 11, minmass=200, invert=True);
#f = tp.batch(aa[:], 11, invert=True);
fig, ax = plt.subplots()
t = tp.link_df(f, 5, memory=3)
t1 = tp.filter_stubs(t, 50)
print(t1)
t1.to_csv("./trackpyResult/t1.csv")
# Compare the number of particles in the unfiltered and filtered data.
print('Before:', t['particle'].nunique())
print('After:', t1['particle'].nunique())
#fig=plt.figure()
#fig=tp.mass_size(t1.groupby('particle').mean()); # convenience function -- just plots size vs. mass
#fig.figure.savefig("./trackpyResult/particle.jpg")
fig=plt.figure()
fig=tp.plot_traj(t1)
fig.figure.savefig("./trackpyResult/trajectoryI.jpg")
t2 = t1
fig=plt.figure()
fig=tp.annotate(t2[t2['frame'] == 0], aa[0]);
fig.figure.savefig("./trackpyResult/t2Annotation.jpg")
d = tp.compute_drift(t2)
fig=plt.figure()
fig=d.plot()
tm = tp.subtract_drift(t1.copy(), d)
fig.figure.savefig("./trackpyResult/comDrift.jpg")
fig=plt.figure()
fig=tp.plot_traj(tm)
fig.figure.savefig("./trackpyResult/traj.jpg")
示例#20
0
print("Loading CDF.pkl done")
'''
feats=tpy.batch(frames, diameter=d, minmass=None, maxsize=59, noise_size=1, smoothing_size=None,
threshold=None, invert=False, topn=None, preprocess= True, max_iterations=10, filter_after=True,
characterize=True, engine='python', meta='Results/Batch_Info.txt')
'''

tr = tpy.link_df(features,
                 search_range=10,
                 adaptive_stop=3,
                 adaptive_step=0.95,
                 memory=3)
#tr.to_pickle("DS5tracks.pkl")
#tr=pandas.read_pickle("DS5tracks.pkl")
tr1 = tpy.filter_stubs(tr, 0.1 * numFiles)
drift = tpy.compute_drift(tr1)
#tr1=tr                                                     #No Truncation
trF = tpy.subtract_drift(tr1, drift)
#plt.show()
plt.clf()
'''
tempTr=trF
R=60
pairs=[]
for t in range(numFiles-1):
    frame0=trF.loc[[t], ['particle', 'x', 'y']]
    frame1=trF.loc[[t+1], ['particle', 'x', 'y']]
    nums0=frame0['particle']
    nums1=frame1['particle']
    for idx, rows in frame1.iterrows():
        num=rows['particle']
示例#21
0
def main():

    for case_idx, case in enumerate(cases.values()):

        res_path = gen_path + case[1]
        frames = pims.ImageSequence(gen_path + case[0], as_grey=True)

        # Stores the unfiltered annotated image of a frame to local file path
        if plots["Annotate_unfiltered"]:
            k = tp.locate(frames[0],
                          11,
                          invert=[case_idx in [0, 1]],
                          minmass=200)
            fig = plt.figure("Annotated_unfiltered_image_" + case[2])
            ax1 = fig.add_subplot()
            a = ["k", "w"][case_idx in [2, 3]]
            tp.annotate(k, frames[0], color=a, ax=ax1)
            #ax1.set_title("Annotated unfiltered image case: "+ case[2])
            #ax1.set_xlabel("x[px]", fontsize=size)
            #ax1.set_ylabel("y[px]", fontsize=size)
            ax1.tick_params(axis="both",
                            which="both",
                            top=False,
                            bottom=False,
                            labelbottom=False,
                            right=False,
                            left=False,
                            labelleft=False)
            fig.savefig(res_path + "Annotated_unfiltered_image_" + case[2] +
                        ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close(fig)

        # If True: Tracks all frames and stores .csv to locally, else: imports such .csv file from local
        if plots["Generate_batch"]:
            f = tp.batch(frames[:225],
                         11,
                         minmass=100,
                         invert=[case_idx in [0, 1]])
            f.to_csv(res_path + "batch_" + case[2] + ".csv")
        if not plots["Generate_batch"]:
            f = pd.read_csv(res_path + "batch_" + case[2] + ".csv")

        # Linking and filtering
        t = tp.link_df(f, 5, memory=3)
        t1 = tp.filter_stubs(t, 50)

        # Plots the size vs mass profile and saves to local file path
        if plots["Size_vs_mass"]:
            fig = plt.figure("Size_vs_mass_" + case[2])
            ax1 = fig.add_subplot()
            tp.mass_size(
                t1.groupby('particle').mean(),
                ax=ax1)  # convenience function -- just plots size vs. mass
            #ax1.set_title("Size vs mass case: " + case[2])
            ax1.set_xlabel("mass", fontsize=size)
            ax1.set_ylabel("Gyration radius [px]", fontsize=size)
            ax1.spines['top'].set_visible(False)
            ax1.spines['right'].set_visible(False)
            ax1.tick_params(axis="both", which="major", labelsize=size)
            ax1.tick_params(axis="both", which="minor", labelsize=size)
            fig.savefig(res_path + "Size_vs_mass_" + case[2] + ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close(fig)

        if plots["Annotate_filtered"]:

            if case_idx in [0, 1]:  # Set BF condition
                condition = lambda x: (
                    (x['mass'].mean() > 250) & (x['size'].mean() < 3.0) &
                    (x['ecc'].mean() < 0.1))

            elif case_idx in [2, 3]:  # Set DF condition
                condition = lambda x: (
                    (x['mass'].mean() > 100) & (x['size'].mean() < 5.0) &
                    (x['ecc'].mean() < 0.1))

            t2 = tp.filter(
                t1, condition
            )  # a wrapper for pandas' filter that works around a bug in v 0.12

            fig = plt.figure("Annotated_filtered_image_" + case[2])
            ax1 = fig.add_subplot()
            k = ["k", "w"][case_idx in [2, 3]]
            tp.annotate(t2[t2['frame'] == 0], frames[0], color=k, ax=ax1)
            #ax1.set_title("Annotated filtered image case: " + case[2])
            #ax1.set_xlabel("x[px]", fontsize=size)
            #ax1.set_ylabel("y[px]", fontsize=size)
            ax1.tick_params(axis="both",
                            which="both",
                            top=False,
                            bottom=False,
                            labelbottom=False,
                            right=False,
                            left=False,
                            labelleft=False)
            fig.savefig(res_path + "Annotated_filtered_image_" + case[2] +
                        ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close(fig)

        if plots["Gyration_radius_filtered"]:
            size_dis_t1 = [i * ratio_μm_px for i in t1['size']]
            plt.figure("Gyration_radius_filtered_" + case[2])
            plt.hist(size_dis_t1, bins=300, color="k", alpha=0.5)
            #plt.title("Gyration radius filtered case: "+ case[2])
            plt.ylabel("Events", fontsize=size)
            plt.xlabel("Gyration radius [μm]", fontsize=size)
            plt.gca().spines['top'].set_visible(False)
            plt.gca().spines['right'].set_visible(False)
            plt.tick_params(axis="both", which="major", labelsize=size)
            plt.tick_params(axis="both", which="minor", labelsize=size)
            plt.savefig(res_path + "Gyration_radius_filtered" + case[2] +
                        ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close("all")

        if plots["Gyration_radius_unfiltered"]:
            size_dis_t = [i * ratio_μm_px for i in t['size']]
            plt.figure("Gyration_radius_unfiltered_" + case[2])
            plt.hist(size_dis_t, bins=300, color="k", alpha=0.5)
            #plt.title("Gyration radius unfiltered case: " + case[2])
            plt.ylabel("Events", fontsize=size)
            plt.xlabel("Gyration radius [μm]", fontsize=size)
            plt.gca().spines['top'].set_visible(False)
            plt.gca().spines['right'].set_visible(False)
            plt.tick_params(axis="both", which="major", labelsize=size)
            plt.tick_params(axis="both", which="minor", labelsize=size)
            plt.savefig(res_path + "Gyration_radius_unfiltered" + case[2] +
                        ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close("all")

        d = tp.compute_drift(t1)
        tm = tp.subtract_drift(t1, d)

        if plots["Trajectory_drift"]:
            fig = plt.figure("Trajectory_drift_subtracted_" + case[2])
            ax1 = fig.add_subplot()
            ax1.tick_params(axis="both", which="major", labelsize=size)
            ax1.tick_params(axis="both", which="minor", labelsize=size)
            ax1.spines['top'].set_visible(False)
            ax1.spines['right'].set_visible(False)
            tp.plot_traj(tm, ax=ax1)
            #ax1.set_title("Trajectory with drift subtracted case: " + case[2])
            plt.savefig(res_path + "Trajectory_drift_subtracted_" + case[2] +
                        ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close(fig)

        if plots["Variance_all_parts"]:
            im = tp.imsd(
                tm, ratio_μm_px, fps, max_lagtime=225
            )  # microns per pixel = 100/285., frames per second = 24
            plt.figure("Variance_for_all_particles_" + case[2])
            #plt.title("Variance for all particles case: " + case[2])
            plt.plot(im.index, im, 'k-',
                     alpha=0.1)  # black lines, semitransparent
            plt.ylabel(r'$\langle \Delta r^2 \rangle$ [$\mu$m$^2$]',
                       fontsize=size),
            plt.xlabel('time $t$', fontsize=size)
            plt.gca().spines['top'].set_visible(False)
            plt.gca().spines['right'].set_visible(False)
            plt.xscale('log')
            plt.yscale('log')
            plt.savefig(res_path + "Variance_for_all_particles_" + case[2] +
                        ".png",
                        bbox_inches="tight",
                        pad_inches=0)

        if plots["Variance_linear"] or plots["Variance_power_fit"] or plots[
                "Return_A_and_D"]:
            em = tp.emsd(tm, ratio_μm_px, fps, max_lagtime=225)

        if plots["Variance_linear"]:
            plt.figure("Variance_linear_fit_" + case[2])
            #plt.title("Variance linear fit case: " + case[2])
            plt.plot(em.index, em, 'ko', alpha=0.5)
            plt.ylabel(r'$\langle \Delta r^2 \rangle$ [$\mu$m$^2$]',
                       fontsize=size),
            plt.xlabel('time $t$ [s]', fontsize=size)
            plt.gca().spines['top'].set_visible(False)
            plt.gca().spines['right'].set_visible(False)
            plt.tick_params(axis="both", which="major", labelsize=size)
            plt.tick_params(axis="both", which="minor", labelsize=size)
            plt.xscale('log')
            plt.yscale('log')
            plt.ylim(1e-2, 50)
            plt.savefig(res_path + "Variance_linear_fit_" + case[2] + ".png",
                        bbox_inches="tight",
                        pad_inches=0)

        if plots["Variance_power_fit"]:
            fig = plt.figure("Variance_power_fit_" + case[2])
            ax1 = fig.add_subplot()
            tp.utils.fit_powerlaw(em, ax=ax1, color="k", alpha=0.5)
            #ax1.set_title("Variance power fitted case: " + case[2])
            ax1.set_ylabel(r'$\langle \Delta r^2 \rangle$ [$\mu$m$^2$]',
                           fontsize=size)
            ax1.set_xlabel('time $t$ [s]', fontsize=size)
            plt.gca().spines['top'].set_visible(False)
            plt.gca().spines['right'].set_visible(False)
            ax1.tick_params(axis="both", which="major", labelsize=size)
            ax1.tick_params(axis="both", which="minor", labelsize=size)
            fig.savefig(res_path + "Variance_power_fit_" + case[2] + ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close(fig)

        if plots["Hydrodynamic_radius_filtered"]:
            im = tp.imsd(tm, ratio_μm_px, fps, max_lagtime=225)
            r_h = []
            count = 0
            im = im.rename_axis("ID").values

            for index in range(1, len(im[0])):
                if isinstance(im[40][index], float) and isinstance(
                        im[8][index], float):
                    D = (im[40][index] - im[8][index]) / (4 * (40 - 8) /
                                                          fps) * 10**(-12)
                    if isinstance(D, float):
                        r_h += [abs(10**6 * (k_b * T) / (6 * np.pi * μ * D))]
                        if 0 < abs(10**6 * (k_b * T) /
                                   (6 * np.pi * μ * D)) < 6:
                            count += 1

            print("In interval: ", count, "Total: ", len(r_h), "Ratio: ",
                  count / len(r_h))
            plt.figure("Hydrodynamic_radius_filtered_" + case[2])
            plt.hist(r_h,
                     bins=int(count / 3),
                     color="k",
                     alpha=0.5,
                     range=(0, 6))
            #plt.title("Hydrodynamic radius filtered case: "+ case[2])
            plt.ylabel("Trajectories", fontsize=size)
            plt.xlabel("Hydrodynamic radius [μm]", fontsize=size)
            plt.gca().spines['top'].set_visible(False)
            plt.gca().spines['right'].set_visible(False)
            plt.tick_params(axis="both", which="major", labelsize=size)
            plt.tick_params(axis="both", which="minor", labelsize=size)
            plt.savefig(res_path + "Hydrodynamic_radius_filtered" + case[2] +
                        ".png",
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close("all")

        if plots["Return_A_and_D"]:
            A = tp.utils.fit_powerlaw(em, ax=ax1, color="k",
                                      alpha=0.5)["A"] * 10**(-12)
            print(tp.utils.fit_powerlaw(em, ax=ax1, color="k", alpha=0.5))
            print("For case ", case[2], " A=", A, ", and D=", A / 4, ".")
示例#22
0
    def analyze(self, plot_gif=False):
        self.drifts = []
        self.v_drift_mag = []
        self.D_constants = []
        self.D_constants2 = []
        self.msd_slope = []
        self.msd_intercept = []
        self.mu_hats = []
        self.ed = []
        self.em = []
        self.frames = []
        self.dataframes = []

        for i, path in enumerate(self.SXM_PATH):
            frames = SXMReader(path)
            self.frames.append(frames)
            self.NM_PER_PIXEL = frames.meters_per_pixel * 1e9
            molecule_size, min_mass, max_mass, separation, min_size, max_ecc, adaptive_stop, search_range, _ = self.PARAMS[
                i]
            f = tp.batch(frames,
                         molecule_size,
                         minmass=min_mass,
                         separation=separation)
            t = tp.link(f,
                        search_range=search_range,
                        adaptive_stop=adaptive_stop)
            t1 = t[((t['mass'] > min_mass) & (t['size'] > min_size)
                    & (t['ecc'] < max_ecc)) & (t['mass'] < max_mass)]
            t2 = tp.filter_stubs(t, 3)
            # Compare the number of particles in the unfiltered and filtered data.
            print('Before:', t['particle'].nunique())
            print('After:', t2['particle'].nunique())

            if plot_gif == True:
                moviename = "{}-{}".format(min(self.fileranges[i]),
                                           max(self.fileranges[i]))
                singlemoviefolder = self.MOVIE_FOLDER + moviename + "/"
                if not os.path.exists(singlemoviefolder):
                    os.makedirs(singlemoviefolder)
                mpl.rcParams.update({'font.size': 14, 'font.weight': 'bold'})
                mpl.rc('image', origin='lower')
                mpl.rc('text', usetex=False)
                mpl.rc('text', color='orange')

                fns = []
                for j, frame in enumerate(frames):
                    fig = plt.figure(figsize=(5, 5))
                    tp.plot_traj(t2[(t2['frame'] <= j)],
                                 superimpose=frames[j],
                                 label=True)
                    fn = singlemoviefolder + "Image_{}.png".format(
                        self.fileranges[i][j])
                    fig.savefig(fn)
                    fns.append(fn)
                    ax = plt.gca()  # get the axis
                    ax.set_ylim(ax.get_ylim()[::-1])  # invert the axis
                    ax.xaxis.tick_top()  # and move the X-Axis
                    ax.yaxis.set_ticks(np.arange(0, 16, 1))  # set y-ticks
                    ax.yaxis.tick_left()  # remove right y-Ticks
                    plt.clf()
                mpl.rc('text', color='black')
                images = []
                for fn in fns:
                    images.append(imageio.imread(fn))
                imageio.mimsave(singlemoviefolder + moviename + '.gif',
                                images,
                                duration=0.5)
                self._cleanup_png(singlemoviefolder)

            # Compute drifts
            d = tp.compute_drift(t2)
            d.loc[0] = [0, 0]
            t3 = t2.copy()
            # Storing drifts
            self.drifts.append(d)

            # Method 1 of calculating D: variance of all displacements of Delta_t=1
            displacements = self._calculate_displacements(t3)
            self.D_constants.append(
                (displacements.dx.var() + displacements.dy.var()) /
                4)  # r^2 = x^2 + y^2 = 2Dt + 2Dt
            self.mu_hats.append(np.mean(displacements[['dx', 'dy']], axis=0))

            # Method 2 of calculating D: linear fit to MSD
            em = tp.emsd(t3,
                         frames.meters_per_pixel * 1e9,
                         self.DIFFUSION_TIME,
                         max_lagtime=len(frames),
                         detail=True)
            self.em.append(em)
            self.ed.append([em['<x>'], em['<y>']])
            result = linregress(em.index[:-8] * self.DIFFUSION_TIME,
                                em['msd'][:-8])
            self.msd_slope.append(result.slope)
            self.msd_intercept.append(result.intercept)
            self.D_constants2.append(result.slope / 4)

            # Store dataframe for future analysis
            self.dataframes.append(t3)
        self.v_drift_mag = np.linalg.norm(self.mu_hats, 2, axis=1)