Esempio n. 1
0
    def test_storage(self):
        STORE_NAME = 'temp_for_testing_{0}.h5'.format(_random_hash())
        if os.path.isfile(STORE_NAME):
            os.remove(STORE_NAME)
        try:
            s = self.storage_class(STORE_NAME)
        except IOError:
            nose.SkipTest('Cannot make an HDF5 file. Skipping')
        else:
            tp.batch(self.v, output=s, engine='python', meta=False,
                     **self.PARAMS)
            self.assertEqual(len(s), 2)
            self.assertEqual(s.max_frame, 1)
            count_total_dumped = s.dump()['frame'].nunique()
            count_one_dumped = s.dump(1)['frame'].nunique()
            self.assertEqual(count_total_dumped, 2)
            self.assertEqual(count_one_dumped, 1)
            assert_frame_equal(s.dump().reset_index(drop=True), 
                               self.expected.reset_index(drop=True))
            assert_frame_equal(s[0], s.get(0))

            # Putting an empty df should warn
            with warnings.catch_warnings(record=True) as w:
                warnings.simplefilter('ignore')
                warnings.simplefilter('always', UserWarning)
                s.put(pandas.DataFrame())
                assert len(w) == 1
            s.close()
            os.remove(STORE_NAME)
Esempio n. 2
0
 def test_storage(self):
     STORE_NAME = 'temp_for_testing_{0}.h5'.format(_random_hash())
     if os.path.isfile(STORE_NAME):
         os.remove(STORE_NAME)
     try:
         s = self.storage_class(STORE_NAME)
     except IOError:
         nose.SkipTest('Cannot make an HDF5 file. Skipping')
     else:
         tp.batch(self.v[[0, 1]],
                  *self.PARAMS,
                  output=s,
                  engine='python',
                  meta=False)
         self.assertEqual(len(s), 2)
         self.assertEqual(s.max_frame, 1)
         count_total_dumped = s.dump()['frame'].nunique()
         count_one_dumped = s.dump(1)['frame'].nunique()
         self.assertEqual(count_total_dumped, 2)
         self.assertEqual(count_one_dumped, 1)
         assert_frame_equal(s.dump().reset_index(drop=True),
                            self.expected.reset_index(drop=True))
         assert_frame_equal(s[0], s.get(0))
         s.close()
         os.remove(STORE_NAME)
Esempio n. 3
0
    def test_storage(self):
        STORE_NAME = 'temp_for_testing_{0}.h5'.format(_random_hash())
        if os.path.isfile(STORE_NAME):
            os.remove(STORE_NAME)
        try:
            s = self.storage_class(STORE_NAME)
        except IOError:
            nose.SkipTest('Cannot make an HDF5 file. Skipping')
        else:
            tp.batch(self.v,
                     output=s,
                     engine='python',
                     meta=False,
                     **self.PARAMS)
            self.assertEqual(len(s), 2)
            self.assertEqual(s.max_frame, 1)
            count_total_dumped = s.dump()['frame'].nunique()
            count_one_dumped = s.dump(1)['frame'].nunique()
            self.assertEqual(count_total_dumped, 2)
            self.assertEqual(count_one_dumped, 1)
            assert_frame_equal(s.dump().reset_index(drop=True),
                               self.expected.reset_index(drop=True))
            assert_frame_equal(s[0], s.get(0))

            # Putting an empty df should warn
            with warnings.catch_warnings(record=True) as w:
                warnings.simplefilter('ignore')
                warnings.simplefilter('always', UserWarning)
                s.put(pandas.DataFrame())
                assert len(w) == 1
            s.close()
            os.remove(STORE_NAME)
Esempio n. 4
0
    def run_particle_detection(self, overwrite=False):
        print('Run particle detection')

        invert = self.gb_detection.check_invert.isChecked()
        diameter = self.gb_detection.spn_diameter.value()
        minmass = self.gb_detection.spn_minmass.value()

        gv.h5f.attrs[gv.KEY_ATTR_DETECT_INV] = invert
        gv.h5f.attrs[gv.KEY_ATTR_DETECT_DIA] = diameter
        gv.h5f.attrs[gv.KEY_ATTR_DETECT_MINMASS] = minmass

        print(f'Invert {invert}')
        print(f'Diameter {diameter}')
        print(f'Minmass {minmass}')

        trackpy_filepath = file_handling.get_trackpy_path()
        mode = 'a'
        # If detection file exists, ask if should be removed
        if os.path.exists(trackpy_filepath):

            if not (overwrite):
                confirm_dialog = QtWidgets.QMessageBox.question(
                    gv.w, 'Overwrite?',
                    'Previous particle detection exists. Overwrite?',
                    QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
                    | QtWidgets.QMessageBox.Cancel, QtWidgets.QMessageBox.No)

                if confirm_dialog \
                        in [QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Cancel]:
                    return

            print('Remove previous detection file')
            if gv.tpf is not None:
                gv.tpf.close()
            gv.tpf = None
            mode = 'w'

        # Disable window
        gv.statusbar.showMessage('Run particle detection')
        gv.w.setEnabled(False)
        gv.app.processEvents()

        # Run detection
        with tp.PandasHDFStoreBig(trackpy_filepath, mode=mode) as s:
            tp.batch(gv.h5f[gv.KEY_PROCESSED],
                     diameter,
                     invert=invert,
                     minmass=minmass,
                     output=s,
                     processes='auto')

        # Enable window
        gv.w.setEnabled(True)
        gv.statusbar.set_ready()

        # Open detection file
        gv.tpf = tp.PandasHDFStoreBig(trackpy_filepath)
Esempio n. 5
0
 def test_PandasHDFStore(self):
     STORE_NAME = 'temp_for_testing.h5'
     if os.path.isfile(STORE_NAME):
         os.remove(STORE_NAME)
     try:
         s = tp.PandasHDFStore(STORE_NAME)
     except:
         nose.SkipTest('Cannot make an HDF5 file. Skipping')
     else:
         tp.batch(self.v[[0, 1]], *self.PARAMS,
                  output=s, engine='python', meta=False)
         assert_frame_equal(s.dump().reset_index(drop=True), 
                            self.expected.reset_index(drop=True))
         os.remove(STORE_NAME)
Esempio n. 6
0
    def locate(self, diameter=7, plot_progress=False, **configs):
        configs['diameter'] = diameter

        # self.locate_configs has the highest priority
        configs.update(self.locate_configs)

        # Calculate locations
        tic = time.time()

        # Locate
        tp.quiet()

        def after_locate(frame_no, features):
            # Plot progress if required
            if plot_progress:
                self.show_text(
                    self.axes,
                    'Calculating {}/{} ...'.format(frame_no, self.n_frames))
                self.canvas.draw()
            console.print_progress(frame_no, self.n_frames)
            return features

        self.locations = tp.batch(self.objects,
                                  processes=0,
                                  after_locate=after_locate,
                                  **configs)

        console.show_status('Locating completed. Configurations:')
        console.supplement(configs)

        # Clear status
        if plot_progress: self.axes.cla()

        self.effective_locate_config = configs
Esempio n. 7
0
def example_with_trackpy_and_twv(filename):
    """
    Example usecase from input file to particle and trap positions in .dat file.
    """
    frames = pims.open(filename)
    # Open file with pims. Works with many file extensions.
    # This example assumes .twv file.

    # metadata = frames.get_all_metadata()
    # Optional access to additional metadata.

    times, laser_powers, traps = frames.get_all_tweezer_positions()
    # Obtain frame times, laser power at each frame time and
    # traps powers and positions at each frame.

    features = tp.batch(frames, 25, minmass=1000, invert=False)
    # Obtain features (particle positions) using trackpy's batch function.
    # It is verbose.
    # The 25 in arguments is diameter. It be odd number.
    # It is recommended to obtain parameters using GUI.

    tracks = tp.link_df(features, 15, memory=10)
    # Joins particles positions to tracks (connects them in time).
    # See trackpy documentation for parameters.

    save_tracked_data_pandas(filename[:-4] + '_out.dat', frames, tracks, times,
                             laser_powers, traps)
    def generate_trajectories(self, subject='particles', plot=True, export=True, invert=True, memory=50):
        """

        :param subject:
        :param plot:
        :param export:
        :param invert:
        :param memory:
        :return:
        """
        fv = tp.batch(self.image_sequence, self.particlesize, minmass=self.particleminmass, invert=invert)
        t = tp.link_df(fv, 5, memory=memory)
        if subject == 'particles':
            self.particle_trajectory_list = TrajectorySequence(t)
        elif subject == 'algae':
            self.algae_trajectory_list = TrajectorySequence(t)
        else:
            raise Exception('The argument subject of the current method '
                            '(generate_trajectories) must be either particles or algae')
        if plot:
            tp.plot_traj(t, label=True)
            plt.show()
        if export:
            t.to_csv(self.path + '\\t_' + subject + '.csv', index=None, header=True)
        if subject == 'particles':
            return self.particle_trajectory_list
        if subject == 'algae':
            return self.algae_trajectory_list
Esempio n. 9
0
def extract_tracks_from_frames(frames):
    """Extract tracks as dataframe from movie frames.

    Parameters
    ----------
    frames: np.array
        les différentes images chargées depuis le fichier .tif
        en tant que numpy array

    Returns
    -------
    tracks: pd.DataFrame
        les déplacements des nanoparticules dans le film sous la forme
        d'un dataframe pandas. C'est trackpy qui fait la plupart du travail

    """
    Nbframe = frames.shape[0]
    print('[extract.py] Running extraction from tracks')
    print('Second Step: Localizaton of the nanoparticles in each frame')
    f = tp.batch(frames[:Nbframe], 7, minmass=150, preprocess=False)

    print('Third Step: Computation of the trajectories')
    tracks = tp.link_df(f, search_range=5, memory=7)
    # PB avec le choix des paramètres de memory. Certaines trajectoires sont mal interprétées
    # exemple du film 411 : avec memory = 7, on a une traj qui commence sur un stop bordélique
    # avec memory = 2, la trajectoire passe au dessus du stop en question : c'est ce qui se passe
    # en réalité si on regarde le film. Le pb (je crois) est lié au fait que link_df ne prédit pas
    # la prochaine position en fonction de la vitesse. Ca doit être possible mais comment ?

    return tracks
Esempio n. 10
0
def track_features(images, diameter=5, **kwargs):
    """Locate features - call them blobs, detections - in the given images.

    This is a wrapper for the `trackpy.batch` function.
    See its documentation for parameters and more information.

    Parameters:
    ----------
    diameter: odd int
        the approximate diameter, in pixels, of the features to detect.
    any keyword argument to be passed to the trackpy ``batch`` function

    Returns:
    --------
    features: pd.DataFrame
        the list of detected features

    """
    df = tp.batch(images, diameter=diameter, **kwargs)
    kwargs['diameter'] = diameter
    features = cats.particles.Particles(df)

    # Set up element attributes to be added once the particles are tracked
    features._element_attributes = {
        'source': images,
        'tracking_parameters': kwargs
    }

    return features
Esempio n. 11
0
    def batch(self, array, filter=True, store=True):

        # Check odd numbers
        self.diameter = _nbr2odd(self.diameter)

        # Run TrackPy
        dataframe = tp.batch(
            array,
            self.diameter,
            minmass=self.minmass,
            maxsize=self.maxsize,
            separation=self.separation,
            noise_size=self.noise_size,
            smoothing_size=self.smoothing_size,
            threshold=self.threshold,
            invert=self.invert,
            percentile=self.percentile,
            topn=self.topn,
            preprocess=self.preprocess,
            max_iterations=self.max_iterations,
            characterize=self.characterize,
            engine=self.engine,
        )

        # Store in the instance
        if store:
            self.spots = deepcopy(dataframe)
            self.tracks = deepcopy(dataframe)

        # Filter the trajectory
        if filter:
            dataframe = self.filter(dataframe, store=store)

        return dataframe
Esempio n. 12
0
def Detect(estimateFeatureSize, CameraName, minMass = None, dynamicMinMass = False):
    ImagePath = 'E:/BubbleRisingUltimate/4mmGasBubbleRising/4mmGasBubbleRising'
    Path = os.path.join(ImagePath, CameraName)
    frames = pims.open(Path)
    print('Valid frames length is %d' %len(frames))

    # find five brightest
    if not minMass:
        f = tp.locate(frames[testFrame], estimateFeatureSize)
        mass = list(f['mass']); mass.sort()
        minMass = int(mass[-3]*0.9 + mass[-1]*0.1)
        print(minMass)
    #TopTen = np.argsort(f['mass'])[-5:]
    #TopTenArray = f['mass'][TopTen]
    # show mass histogram
    # show subpixel accuracy of the detection 
    #minMass = list(TopTenArray)[0]
    f = tp.locate(frames[testFrame], estimateFeatureSize, minmass= minMass)
    plt.figure()
    tp.annotate(f, frames[testFrame]);
    # run batch processing for all frames
    if dynamicMinMass:
        f = f[0:0]
        for i in range(len(frames)):
            f_ele = tp.locate(frames[i], estimateFeatureSize)
            mass = list(f_ele['mass']); mass.sort()
            minMass = int(mass[-2]*0.9 + mass[-1]*0.1)
            f_ele = tp.locate(frames[i], estimateFeatureSize, minmass = minMass)
            f = f.append(f_ele)
    else:
        f = tp.batch(frames, estimateFeatureSize, minmass = minMass)
    return f, frames
Esempio n. 13
0
def _locate_batch_wrapper(diameter: int, minmass: int, maxmass: int,
                          maxsize: float, img_seq: np.ndarray) -> pd.DataFrame:
    df = trackpy.batch(img_seq,
                       diameter=diameter,
                       minmass=minmass,
                       maxsize=maxsize)
    df = df[df['mass'] < maxmass]
    return df
Esempio n. 14
0
 def prepare(self):
     directory = os.path.join(path, 'video', 'image_sequence')
     self.v = tp.ImageSequence(directory)
     self.PARAMS = (11, 3000)
     self.expected = tp.batch(self.v[[0, 1]],
                              *self.PARAMS,
                              engine='python',
                              meta=False)
Esempio n. 15
0
 def setUpClass(cls):
     super(TestReproducibility, cls).setUpClass()
     # generate a new file
     video = pims.ImageSequence(
         os.path.join(path, 'video', 'image_sequence'))
     actual = tp.batch(invert_image(video), diameter=9, minmass=240)
     actual = tp.link_df(actual, search_range=5, memory=2)
     actual.to_csv(reproduce_fn)
Esempio n. 16
0
 def prepare(self):
     directory = os.path.join(path, 'video', 'image_sequence')
     self.v = ImageSequence(os.path.join(directory, '*.png'))
     # mass depends on pixel dtype, which differs per reader
     minmass = self.v[0].max() * 2
     self.PARAMS = {'diameter': 11, 'minmass': minmass, 'invert': True}
     self.expected = tp.batch(self.v[[0, 1]], engine='python', meta=False,
                              **self.PARAMS)
Esempio n. 17
0
 def prepare(self):
     directory = os.path.join(path, 'video', 'image_sequence')
     self.v = tp.invert_image(ImageSequence(os.path.join(directory, '*.png')))
     # mass depends on pixel dtype, which differs per reader
     minmass = self.v[0].max() * 2
     self.PARAMS = {'diameter': 11, 'minmass': minmass}
     self.expected = tp.batch(self.v[[0, 1]], engine='python', meta=False,
                              **self.PARAMS)
Esempio n. 18
0
 def detect(self,
            images):  ## and has same output format as YOLO localizer
     circles = []
     for n in range(np.shape(images)[0]):
         norm = images[n]
         circ = ct.circletransform(norm, theory='orientTrans')
         circles.append(circ / np.amax(circ))
     return tp.batch(circles, 51, minmass=50)
Esempio n. 19
0
 def prepare(self):
     directory = os.path.join(path, 'video', 'image_sequence')
     v = TrackpyImageSequence(os.path.join(directory, '*.png'))
     self.v = [tp.invert_image(v[i]) for i in range(2)]
     # mass depends on pixel dtype, which differs per reader
     minmass = self.v[0].max() * 2
     self.PARAMS = {'diameter': 11, 'minmass': minmass}
     self.expected = tp.batch(self.v, engine='python', meta=False,
                              **self.PARAMS)
Esempio n. 20
0
 def test_storage(self):
     STORE_NAME = 'temp_for_testing_{0}.h5'.format(_random_hash())
     if os.path.isfile(STORE_NAME):
         os.remove(STORE_NAME)
     try:
         s = self.storage_class(STORE_NAME)
     except IOError:
         nose.SkipTest('Cannot make an HDF5 file. Skipping')
     else:
         tp.batch(self.v[[0, 1]], *self.PARAMS,
                  output=s, engine='python', meta=False)
         if len(s) != 2:
             raise
         assert len(s) == 2
         assert s.max_frame == 1
         assert_frame_equal(s.dump().reset_index(drop=True), 
                            self.expected.reset_index(drop=True))
         assert_frame_equal(s[0], s.get(0))
         s.close()
         os.remove(STORE_NAME)
Esempio n. 21
0
def nps_finder_tp_cluster(image_file, z_extent, filter_kernel, min_sep):
    files, metadata = preprocessor.data_org(tifdir)

    columns = ('x', 'y', 'z', 'a_total', 'intensity', 'particle', 'com_dist',
               'trial')
    all_parts = pd.DataFrame([], columns=columns)

    for i in range(0, len(files)):
        print(i)
        print(files[i])
        frames = pims.TiffStack('./tifs/' + folders[m] + '/tifs/' + files[i])
        # nps = np.array(frames[::2])
        # nuclei = np.array(frames[1::2])
        features = tp.batch(nps[:], diameter=filter_kernel, separation=min_sep)

        # Option to add a mass cut
        # features = features[features['raw_mass'] > 0]

        if len(features) == 0:
            features['particle'] = []
        if len(features) == 1:
            features['particle'] = 1
        elif len(features) > 1:
            # Clustering to eliminate maxima that are too close together
            positions = features[['x', 'y', 'frame']].values
            # Distance matrix is n-particles x n-particles in size - reckon it gives the interparticle separation
            # This gives the upper triangle of the distance matrix
            dist_mat = dist.pdist(positions)
            link_mat = hier.linkage(dist_mat)
            # fcluster assigns each of the particles in positions a cluster to which it belongs
            cluster_idx = hier.fcluster(link_mat, 5, criterion='distance')
            features['particle'] = cluster_idx

        n_parts = np.unique(features['particle'].values)
        values_clustered = []
        for j in n_parts:
            current = features[features['particle'] == j]
            if len(current) > z_extent:
                norm = np.sum(current['raw_mass'])
                x_av = np.mean(current['x'])
                y_av = np.mean(current['y'])
                # z-value is the intensity-weighted value
                z_av = np.sum(current['raw_mass'] * current['frame']) / norm
                a_total = np.sum(current['size'])
                # Geometric cut-off to particle size
                if a_total > 1:
                    values_clustered.append(
                        [x_av, y_av, z_av, a_total, norm, j])

        columns_clustered = ('x', 'y', 'z', 'a_total', 'intensity', 'particle')
        particles_clustered = pd.DataFrame(values_clustered,
                                           columns=columns_clustered)
Esempio n. 22
0
 def prepare(self, batch_params=None):
     directory = os.path.join(path, 'video', 'image_sequence')
     v = TrackpyImageSequence(os.path.join(directory, '*.png'))
     self.v = [tp.invert_image(v[i]) for i in range(2)]
     # mass depends on pixel dtype, which differs per reader
     minmass = self.v[0].max() * 2
     self.PARAMS = {'diameter': 11, 'minmass': minmass}
     if batch_params is not None:
         self.PARAMS.update(batch_params)
     self.expected = tp.batch(self.v,
                              engine='python',
                              meta=False,
                              **self.PARAMS)
Esempio n. 23
0
 def test_storage(self):
     STORE_NAME = 'temp_for_testing_{0}.h5'.format(_random_hash())
     if os.path.isfile(STORE_NAME):
         os.remove(STORE_NAME)
     try:
         s = self.storage_class(STORE_NAME)
     except IOError:
         nose.SkipTest('Cannot make an HDF5 file. Skipping')
     else:
         tp.batch(self.v[[0, 1]], output=s, engine='python', meta=False,
                  **self.PARAMS)
         self.assertEqual(len(s), 2)
         self.assertEqual(s.max_frame, 1)
         count_total_dumped = s.dump()['frame'].nunique()
         count_one_dumped = s.dump(1)['frame'].nunique()
         self.assertEqual(count_total_dumped, 2)
         self.assertEqual(count_one_dumped, 1)
         assert_frame_equal(s.dump().reset_index(drop=True), 
                            self.expected.reset_index(drop=True))
         assert_frame_equal(s[0], s.get(0))
         s.close()
         os.remove(STORE_NAME)
Esempio n. 24
0
    def batch_locate_particles(self, frames, diameter, minmass, filename='data.h5',
                               progress_listener=lambda *args: None):
        self.log.info('Starting particle tracking to file: ' + filename)
        self.log.info('Diameter: ' + str(diameter) + "; Minmass: " + str(minmass))

        def batches_gen(iterator, size):
            batch = []
            i = 0
            for item in iterator:
                batch.append(item)
                i += 1

                if i == size:
                    yield batch
                    batch = []
                    i = 0

        with trackpy.PandasHDFStore(filename, t_column='frame') as s:
            for batch in batches_gen(frames, 5):
                trackpy.batch(batch, diameter, minmass=minmass, invert=True,
                              output=s, engine='numba')
        self.log.info('Particles located')
        return self
Esempio n. 25
0
 def test_storage(self):
     STORE_NAME = 'temp_for_testing.h5'
     if os.path.isfile(STORE_NAME):
         os.remove(STORE_NAME)
     try:
         s = self.storage_class(STORE_NAME)
     except IOError:
         nose.SkipTest('Cannot make an HDF5 file. Skipping')
     else:
         tp.batch(self.v[[0, 1]],
                  *self.PARAMS,
                  output=s,
                  engine='python',
                  meta=False)
         print s.store.keys()
         print dir(s.store.root)
         assert len(s) == 2
         assert s.max_frame == 1
         assert_frame_equal(s.dump().reset_index(drop=True),
                            self.expected.reset_index(drop=True))
         assert_frame_equal(s[0], s.get(0))
         s.close()
         os.remove(STORE_NAME)
Esempio n. 26
0
 def test_everything(self):
     """
     End to end test.
     Test the whole tracking pipeline from input file to particle and trap
     positions in the output file.
     """
     filename = "../../examples/data/test_example.twv"
     frames = pims.open(filename)
     times, laser_powers, traps = frames.get_all_tweezer_positions()
     features = tp.batch(frames, 25, minmass=1000, invert=False)
     tracks = tp.link_df(features, 15, memory=10)
     save_tracked_data_pandas(filename[:-4] + '_out.dat', frames, tracks, times, laser_powers, traps)
     with open(filename[:-4] + '_out.dat', 'r') as calculated_file:
         with open(filename[:-4] + '_expected.dat', 'r') as expected_file:
             for calculated, expected in zip(calculated_file, expected_file):
                 self.assertEqual(calculated, expected)
Esempio n. 27
0
def compute_traj(filename):

    vid = pims.Video('../test_video/' + filename)
    frames = as_grey(vid)

    midpoint = len(frames) / 2
    start = int(midpoint - 60)
    stop = int(midpoint + 60)

    f = tp.batch(frames[start:stop],
                 11,
                 invert=False,
                 minmass=160,
                 maxsize=3.0,
                 engine="numba")

    t = tp.link_df(f, 5, memory=3)

    t1 = tp.filter_stubs(t, 60)
    # Compare the number of particles in the unfiltered and filtered data.
    print('Before:', t['particle'].nunique())
    print('After:', t1['particle'].nunique())

    data = []
    for item in set(t1.particle):
        sub = t1[t1.particle == item]
        dvx = np.diff(sub.x)
        dvy = np.diff(sub.y)
        for x, y, dx, dy, frame, mass, size, ecc, signal, raw_mass, ep in \
        zip(sub.x[:-1], sub.y[:-1], dvx, dvy, sub.frame[:-1], sub.mass[:-1], sub['size'][:-1], sub.ecc[:-1], sub.signal[:-1], sub.raw_mass[:-1], sub.ep[:-1]):
            data.append({
                'dx': dx,
                'dy': dy,
                'x': x,
                'y': y,
                'frame': frame,
                'particle': item,
                'size': size,
                'ecc': ecc,
                'signal': signal,
                'mass': mass,
                'raw_mass': raw_mass,
                'ep': ep
            })
    df = pd.DataFrame(data)
    df.to_csv('../csvs/extract.csv')
Esempio n. 28
0
def Detect(estimateFeatureSize,
           CameraName,
           minMass=None,
           dynamicMinMass=False,
           Crop=False):
    ImagePath = os.path.join('data',
                             CaseName.split('-')[0],
                             CaseName.split('-')[1])
    Path = os.path.join(ImagePath, CameraName + '*.tif')
    frames = pims.open(Path)
    if Crop:
        frames = pims.process.crop(frames, ((400, 700), (0, 0)))
    print('Valid frames length is %d' % len(frames))
    # check start frame and end frame with total frames number
    if len(frames) != (endFrame - startFrame + 1):
        print('Invalid frames length')
        return
    # find five brightest
    if not minMass:
        f = tp.locate(frames[testFrame], estimateFeatureSize)
        mass = list(f['mass'])
        mass.sort()
        minMass = int(mass[-2] * 0.9 + mass[-1] * 0.1)
        print(minMass)
    #TopTen = np.argsort(f['mass'])[-5:]
    #TopTenArray = f['mass'][TopTen]
    # show mass histogram
    # show subpixel accuracy of the detection
    #minMass = list(TopTenArray)[0]
    f = tp.locate(frames[testFrame], estimateFeatureSize, minmass=minMass)
    plt.figure()
    tp.annotate(f, frames[testFrame])
    # run batch processing for all frames
    if dynamicMinMass:
        f = f[0:0]
        for i in range(len(frames)):
            f_ele = tp.locate(frames[i], estimateFeatureSize)
            mass = list(f_ele['mass'])
            mass.sort()
            minMass = int(mass[-2] * 0.9 + mass[-1] * 0.1)
            f_ele = tp.locate(frames[i], estimateFeatureSize, minmass=minMass)
            f = f.append(f_ele)
    else:
        f = tp.batch(frames, estimateFeatureSize, minmass=minMass)
    return f, frames
Esempio n. 29
0
def cell_detect(file, var, opt):
    print('Detecting cells')
    # http://soft-matter.github.io/trackpy/v0.3.2/tutorial/walkthrough.html

    # load, run object detection and track (then clean up)
    raw_frames = pims.TiffStack(file, as_grey=True)  # load

    # only analyse n frames
    if var['frames_keep'] is not 0:
        raw_frames = raw_frames[0:var['frames_keep']]

    # object detect
    cellsdf = tp.batch(raw_frames,
                       var['diameter'],
                       minmass=var['minFluroMass'],
                       separation=var['separation'],
                       engine='numba',
                       max_iterations=1,
                       characterize=False,
                       noise_size=var['noise_smooth'])

    # remove brightest objects
    cellsdf = cellsdf.drop(cellsdf[cellsdf.mass > var['maxFluroMass']].index)

    if opt['plot_inter_static']:
        annotate_args = {"vmin": 0, "vmax": 200}
        # Tweak styles
        plt.ion()
        plt.show()
        fig_dims = np.multiply(0.01, raw_frames[0].shape)
        mpl.rc('figure',
               figsize=(fig_dims[1].astype(int), fig_dims[0].astype(int)))
        mpl.rc('image', cmap='gray')

        # plot final particles chosen
        plt.figure()
        tp.annotate(cellsdf[cellsdf.frame == var['frame_plot']],
                    raw_frames[var['frame_plot']],
                    imshow_style=annotate_args)
        plt.title('Particles included in analysis'
                  '(at t=' + str(var['frame_plot']) + ')')
        plt.draw()
        plt.pause(0.001)

    return cellsdf, raw_frames
Esempio n. 30
0
def save_data(frame_dir, save_to_dir):
    """
    Takes a dirpath containing video frames and extracts the data.
    :param frame_dir: the directory containing the frames.
    :param save_to_dir: the directory path where we save our data, without the last '/'.
    :return:
    """
    print("Getting frame data from " + frame_dir)
    frames = pims.ImageSequence(frame_dir + '/' + FRAME_NAME + '*.jpg',
                                as_grey=True)
    try:
        data = tp.batch(frames[:-2],
                        PARTICLE_SIZE,
                        invert=True,
                        minmass=MIN_MASS,
                        percentile=PERCENTILE)
    except OSError:
        pass
    frame_dirname = os.path.basename(frame_dir)
    out_filepath = save_to_dir + '/' + frame_dirname + '.csv'
    print("Writing frame data to " + out_filepath)
    data.to_csv(out_filepath)
Esempio n. 31
0
def evaluate_features(video_name, particle_size, particle_minmass, start_frame,
                      length, noise):
    """
		The function that  runs Trackpy's feature detection algorithm on the arrays.

		Parameters:
			video_name (String): The name of the video to be evaluated stored in the Recordings folder.
			particle_size (int): The odd-number size of the feature to be detected by Trackpy.
			particle_minmass (double): The minimum feature brightness to filter using Trackpy's filtering functions.
			start_frame (int): The frame in the video from which to begin evaluation.
			length (int): The number of frames to evaluate.

		Returns:
			frame (DataFrame): The DataFrame of the features found in the video.
	"""
    video_frames = process_video(video_name)
    f = tp.batch(video_frames[start_frame:start_frame + length],
                 particle_size,
                 minmass=particle_minmass,
                 noise_size=noise)

    return f
Esempio n. 32
0
def get_data(outdir):
    """ Loads the output of the preprocessing steps for feature extraction
        Returns the formatted data
    """
    frames = pims.ImageSequence("../"+outdir+"/*tif")
    print(frames)

    # particle diameter
    diam = 11
    features = tp.batch(frames[:frames._count], diameter=diam, minmass=1, invert=True)
    # Link features in time: sigma_(max)
    search_range = diam-2
    # r, g, b images are loaded
    lframes = int(np.floor(frames._count/3))
    # default max 15% frame count
    imax = int(np.floor(15*lframes/100))
    t = tp.link_df(features, search_range, memory=imax)
    # default neighbour strategy: KDTree

    # Filter spurious trajectories
    # default min 10% frame count
    imin = int(np.floor(10*lframes/100))
    # if seen in imin
    t1 = tp.filter_stubs(t, imin)

    # Compare the number of particles in the unfiltered and filtered data.
    print("Unique number of particles (Before filtering):", t["particle"].nunique())
    print("(After):", t1["particle"].nunique())

    # export pandas data frame with filename being current date and time
    timestr = time.strftime("%Y%m%d-%H%M%S")
    data = pd.DataFrame({"x": t1.x, "y": t1.y, "z": t1.frame, "mass": t1.mass, "size": t1.size, "ecc": t1.ecc, "signal": t1.signal, "ep": t1.ep, "particle": t1.particle})

    file_name = "../features_" + timestr + ".csv"
    print("Exporting %s" % (file_name))
    data.to_csv(file_name, sep="\t", encoding="utf-8")
    return data
Esempio n. 33
0
def FindSpots_tp(frames_np, diameter, minmass, separation, max_iterations, DoPreProcessing, percentile, DoParallel = True):
    # run trackpy with the given parameters seriell or parallel (faster after "long" loading time)
    
    num_frames = frames_np.shape[0]

    if (DoParallel == False) or (num_frames < 100):
        nd.logger.info("Find the particles - seriell: starting....")
        output = tp.batch(frames_np, diameter, minmass = minmass, separation = separation, max_iterations = max_iterations, preprocess = DoPreProcessing, engine = 'auto', percentile = percentile)

    else:
        num_cores = multiprocessing.cpu_count()
        nd.logger.info("Find the particles - parallel (Number of cores: %s): starting....", num_cores)
        inputs = range(num_frames)

        num_verbose = nd.handle_data.GetNumberVerbose()
        output_list = Parallel(n_jobs=num_cores, verbose=num_verbose)(delayed(tp.batch)(frames_np[loop_frame:loop_frame+1,:,:].copy(), diameter, minmass = minmass, separation = separation, max_iterations = max_iterations, preprocess = DoPreProcessing, engine = 'auto', percentile = percentile) for loop_frame in inputs)

        empty_frame = []
        #parallel looses frame number, so we add it again
        for frame_id,_ in enumerate(output_list):
            output_list[frame_id].frame = frame_id
            if len(output_list[frame_id]) == 0:
                empty_frame.append(frame_id)

        # go through empty frames (start from the back deleting otherwise indexing fails)
        for frame_id in (np.flip(empty_frame)):
            del output_list[frame_id]

        # make list of pandas to one big pandas
        output = pd.concat(output_list)
        
        # reset the index which starts at every frame again
        output = output.reset_index(drop=True)

    nd.logger.info("Find the particles - finished")
      
    return output
Esempio n. 34
0
def nd2msd(nd_fh):
    # print nd_fh
    frames=pims.ND2_Reader(nd_fh)
    logging.info('number of frames = %d' % len(np.shape(frames)))
    if len(np.shape(frames))==4:
        frames = average_z(frames)
    threshold=np.percentile(frames,75)
    f_batch = tp.batch(frames,diameter=11,threshold=threshold)

    t = tp.link_df(f_batch, search_range=11, memory=3)
    t_flt = tp.filter_stubs(t, 3*int(len(frames)/4))
    try:
        d = tp.compute_drift(t_flt)
        t_cor = tp.subtract_drift(t_flt, d)
    except:
        t_cor=t_flt
        logging.info("drift correction excepted")    
    # plt.figure()
    # tp.plot_traj(t_flt)
    # plt.figure()
    # d.plot()
    imsd=tp.imsd(t_cor,0.1,0.2, max_lagtime=100, statistic='msd')
    emsd=tp.emsd(t_cor,0.1,0.2, max_lagtime=100)
    return imsd,emsd
Esempio n. 35
0
def tracking(video):
    # print "running tracking"
    pimsframes = pims.Video(video, as_grey = True)
    fgbg = cv2.BackgroundSubtractorMOG()
    framesmask = []
    framecount = 0
    blurredframes = []
    # HACK to flip if has space in name. #TODO get all videos correctly alligned...
    if " " in video:
        pimsframes = [p[:, ::-1] for p in pimsframes]

    pimsframes = [frame[:,400:] for frame in pimsframes]
    for frame in pimsframes:
        # frame = cv2.GaussianBlur(frame,(9,9),0)
        # frame = cv2.medianBlur(frame, 7)
        # if align remove
        frame = cv2.GaussianBlur(frame,(11,11),7)
        frame = cv2.medianBlur(frame, 3)
        fgmask = fgbg.apply(frame, learningRate=1.0/history)
        framesmask.append(fgmask)
        framecount += 1
        blurredframes.append(frame)

    background_sub = [m * frame for m,frame in zip(framesmask, pimsframes)]
    if False:
        for i in range(100):
            cv2.imshow("asdf", background_sub[i])
            cv2.imshow("mask", framesmask[i])
            cv2.imshow("orig", pimsframes[i])
            cv2.imshow("blur", blurredframes[i])
            cv2.waitKey(0)
    # for i, f in enumerate(framesmask):
    #     half_show("asdf", f)
    #     cv2.waitKey(0)
    #     print i
    cells = []
    track = []

    to_track = background_sub
    minmass = 3000

    f = tp.batch(to_track[:], 11, minmass=minmass, invert=False, noise_size=3)
    # for j in range(20,100):
    #     f = tp.locate(to_track[j], 11, invert=False, minmass = minmass, noise_size=3)
    #     plt.figure(1)
    #     tp.annotate(f, to_track[j])
    #     plt.show()
    # ipdb.set_trace()
    print "linking"
    try:
        # t = tp.link_df(f, 100, memory=3)
        t = tp.link_df(f, 100, memory=1)

    except Exception:
        print "FAILED on", video
        return None
    print "done"
    # plt.figure(2)
    # tp.plot_traj(t)
    # plt.show()
    return t
Esempio n. 36
0
v = pims.ImageSequence(impath)
# take reader that provides uint8!
assert np.issubdtype(v.dtype, np.uint8)
v0 = tp.invert_image(v[0])
v0_bp = tp.bandpass(v0, lshort=1, llong=9)
expected_find = tp.grey_dilation(v0, separation=9)
expected_find_bandpass = tp.grey_dilation(v0_bp, separation=9)
expected_refine = tp.refine_com(v0, v0_bp, radius=4,
                                coords=expected_find_bandpass)
expected_refine = expected_refine[expected_refine['mass'] >= 140]
expected_refine_coords = expected_refine[pos_columns].values
expected_locate = tp.locate(v0, diameter=9, minmass=140)
expected_locate_coords = expected_locate[pos_columns].values
df = tp.locate(v0, diameter=9)
df = df[(df['x'] < 64) & (df['y'] < 64)]
expected_characterize = df[pos_columns + char_columns].values

f = tp.batch(tp.invert_image(v), 9, minmass=140)
f_crop = f[(f['x'] < 320) & (f['x'] > 280) & (f['y'] < 280) & (f['x'] > 240)]
f_linked = tp.link(f_crop, search_range=5, memory=0)
f_linked_memory = tp.link(f_crop, search_range=5, memory=2)
link_coords = f_linked[pos_columns + ['frame']].values
expected_linked = f_linked['particle'].values
expected_linked_memory = f_linked_memory['particle'].values

np.savez_compressed(npzpath, expected_find, expected_find_bandpass,
                    expected_refine_coords, expected_locate_coords,
                    link_coords, expected_linked, expected_linked_memory,
                    expected_characterize)
Esempio n. 37
0
import trackpy as tp
from os import path
from pimsviewer import Viewer, AnnotatePlugin
from pims import pipeline

@pipeline
def as_grey(frame):
    return 0.2125 * frame[:, :, 0] + 0.7154 * frame[:, :, 1] + 0.0721 * frame[:, :, 2]

filename = path.join(path.dirname(path.realpath(__file__)), '../screenshot.png')

viewer = Viewer(filename)

f = tp.batch(as_grey(viewer.reader), diameter=15)
plugin = AnnotatePlugin(viewer, f)

viewer.run()
Esempio n. 38
0
 def prepare(self):
     directory = os.path.join(path, 'video', 'image_sequence')
     self.v = tp.ImageSequence(directory)
     self.PARAMS = (11, 3000)
     self.expected = tp.batch(self.v[[0, 1]], *self.PARAMS,
                              engine='python', meta=False)
Esempio n. 39
0
def tracking(video):
    # print "running tracking"
    pimsframes = pims.Video(video, as_grey=True)
    fgbg = cv2.BackgroundSubtractorMOG()
    framesmask = []
    framecount = 0
    blurredframes = []
    # HACK to flip if has space in name. #TODO get all videos correctly alligned...
    if " " in video:
        pimsframes = [p[:, ::-1] for p in pimsframes]

    pimsframes = [frame[:, 400:] for frame in pimsframes]
    for frame in pimsframes:
        # frame = cv2.GaussianBlur(frame,(9,9),0)
        # frame = cv2.medianBlur(frame, 7)
        # if align remove
        frame = cv2.GaussianBlur(frame, (11, 11), 7)
        frame = cv2.medianBlur(frame, 3)
        fgmask = fgbg.apply(frame, learningRate=1.0 / history)
        framesmask.append(fgmask)
        framecount += 1
        blurredframes.append(frame)

    background_sub = [m * frame for m, frame in zip(framesmask, pimsframes)]
    if False:
        for i in range(100):
            cv2.imshow("asdf", background_sub[i])
            cv2.imshow("mask", framesmask[i])
            cv2.imshow("orig", pimsframes[i])
            cv2.imshow("blur", blurredframes[i])
            cv2.waitKey(0)
    # for i, f in enumerate(framesmask):
    #     half_show("asdf", f)
    #     cv2.waitKey(0)
    #     print i
    cells = []
    track = []

    to_track = background_sub
    minmass = 3000

    f = tp.batch(to_track[:], 11, minmass=minmass, invert=False, noise_size=3)
    # for j in range(20,100):
    #     f = tp.locate(to_track[j], 11, invert=False, minmass = minmass, noise_size=3)
    #     plt.figure(1)
    #     tp.annotate(f, to_track[j])
    #     plt.show()
    # ipdb.set_trace()
    print "linking"
    try:
        # t = tp.link_df(f, 100, memory=3)
        t = tp.link_df(f, 100, memory=1)

    except Exception:
        print "FAILED on", video
        return None
    print "done"
    # plt.figure(2)
    # tp.plot_traj(t)
    # plt.show()
    return t
Esempio n. 40
0
def get_data(outdir, red, green, blue, diam=11):
    ''' Loads the output of the preprocessing steps for particle extraction
        Returns the formatted data
    '''
    frames = pims.ImageSequence("../" + outdir + "/*tif")
    print frames

    # particle diameter
    features = tp.batch(frames[:frames._count], diameter=diam, \
                        minmass=1, invert=True)

    # Link features in time
    search_range = diam - 2  # sigma_(max)

    lframes = int(np.floor(frames._count / 3))  # r, g, b images are loaded
    imax = int(np.floor(15 * lframes / 100))  # default max 15% frame count
    t = tp.link_df(features, search_range, memory=imax)
    # default neighbour strategy: KDTree

    # Filter spurious trajectories
    imin = int(np.floor(10 * lframes / 100))  # default min 10% frame count
    t1 = tp.filter_stubs(t, imin)  # if seen in imin

    # Compare the number of particles in the unfiltered and filtered data
    print 'Unique number of particles (Before filtering):', t[
        'particle'].nunique()
    print '(After):', t1['particle'].nunique()

    # export pandas data frame with filename being current date and time
    timestr = time.strftime("%Y%m%d-%H%M%S")

    data = pd.DataFrame({ 'x': t1.x, 'y': t1.y,'z':t1.frame,\
                        'mass':t1.mass, 'size':t1.size, 'ecc':t1.ecc,\
                        'signal':t1.signal, 'ep':t1.ep, 'particle':t1.particle\
                        })

    # format the dataframe / original indexing
    data["n"] = np.arange(len(data))

    print("Sorting dataframe by time...")
    data = data.sort(columns='z', ascending=True)

    print("Extracting pixel values of particles...")
    r, g, b = get_val(red, 2, data), get_val(green, 1,
                                             data), get_val(blue, 0, data)

    print("Normalising rgb values to relative quantities...")
    r1, g1, b1 = np.array(r), np.array(g), np.array(b)
    r = (r1 - np.min(r1)) * (65535 / np.max(r1))
    g = (g1 - np.min(g1)) * (65535 / np.max(g1))
    b = (b1 - np.min(b1)) * (65535 / np.max(b1))

    print("Adding (r,g,b) values as columns to dataframe...")
    strname, px_val = ["r", "g", "b"], [r, g, b]
    add_arrays_df(strname, px_val, data)

    # sort back to original state
    data = data.sort(columns='n', ascending=True)

    # remove the previously created column
    data.drop('n', axis=1, inplace=True)

    # format df with rgb values to uint8
    data = format_df(data)

    print "Dataframe summary:\n", data.describe()
    file_name = "../particles_" + timestr + ".csv"
    print "Exporting %s" % (file_name)
    data.to_csv(file_name, sep='\t', encoding='utf-8')

    return data
Esempio n. 41
0
def frames2coords(frames,out_fh,
                  params_locate,params_msd,params_link_df={'search_range':20,},
                  mass_cutoff=0.5,size_cutoff=0.5,ecc_cutoff=0.5,
                    filter_stubs=True,flt_mass_size=True,flt_incomplete_trjs=True,
                    force=False,test=False):
    dns=['f_batch','t','t1','t2']
    dn2dp={dn:f'{out_fh}.{dn}.tsv' for dn in dns}
    dn2df={}
    if not exists(dn2dp['t2']) or force:
        if not exists(dn2dp['t']) or force:
            dn2df['f_batch']=tp.batch(frames,engine='numba',**params_locate)
            dn2df['t']=tp.link_df(dn2df['f_batch'], **params_link_df)
            print(params_link_df)
            dn2df['f_batch'].to_csv(dn2dp['f_batch'],sep='\t')
            dn2df['t'].to_csv(dn2dp['t'],sep='\t')
        else:
            dn2df['t']=pd.read_csv(dn2dp['t'])
        max_lagtime_stubs=params_msd["max_lagtime"]*params_msd["fps"]
        if filter_stubs:
            dn2df['t1'] = tp.filter_stubs(dn2df['t'], max_lagtime_stubs*1.25)
            logging.info('filter_stubs: particle counts: %s to %s' % (dn2df['t']['particle'].nunique(),dn2df['t1']['particle'].nunique()))
            if t1['particle'].nunique()==0:
                logging.error('filter_stubs: particle counts =0; using less stringent conditions')
                dn2df['t1'] = tp.filter_stubs(dn2df['t'], max_lagtime_stubs*1)
        else:
            dn2df['t1'] = dn2df['t'].copy()

        if test:        
            fig=plt.figure()
            ax=plt.subplot(111)
            tp.mass_size(dn2df['t1'].groupby('particle').mean(),ax=ax);
            plt.tight_layout()
            plt.savefig('%s.mass_size.svg' % out_fh,format='svg')        
        if flt_mass_size:
            dn2df['t2'] = dn2df['t1'][((dn2df['t1']['mass'] > dn2df['t1']['mass'].quantile(mass_cutoff)) & (dn2df['t1']['size'] < dn2df['t1']['size'].quantile(size_cutoff)) &
                     (dn2df['t1']['ecc'] < ecc_cutoff))]
            logging.info('filter_mass_size: particle counts: %s to %s' % (dn2df['t1']['particle'].nunique(),dn2df['t2']['particle'].nunique()))
            if len(t2)==0:
                dn2df['t2'] = dn2df['t1'].copy()
                logging.warning('filter_mass_size produced 0 particles; using t2=t1.copy()')
        else:
            dn2df['t2'] = dn2df['t1'].copy()
        if test:        
            fig=plt.figure()
            ax=plt.subplot(111)
            tp.mass_size(dn2df['t2'].groupby('particle').mean(),ax=ax);
            plt.tight_layout()
            plt.savefig('%s.mass_size_post_filtering.svg' % out_fh,format='svg')        
        if flt_incomplete_trjs:
            dn2df['t2']=dn2df['t2'].reset_index()
            vals=pd.DataFrame(dn2df['t2']['particle'].value_counts())
            partis=[i for i in vals.index if vals.loc[i,'particle']>=int(vals.max())*0.95 ]
            dn2df['t2']=dn2df['t2'].loc[[i for i in dn2df['t2'].index if (dn2df['t2'].loc[i,'particle'] in partis)],:]
        dn2df['t2'].to_csv(dn2dp['t2'],sep='\t')
    else:
        dn2df['t2']=pd.read_csv(dn2dp['t2'],sep='\t')
    if test:
        for traj in ['t','t1','t2']:
            ax=plot_traj(frames[-1],traj=dn2df[traj])
        logging.info('getting plots hist')
        cols=['mass','size','ecc','signal','raw_mass','ep']
        fig=plt.figure()
        ax=plt.subplot(111)
        _=dn2df['t2'].loc[:,cols].hist(ax=ax)        
    return dn2df['t2']