예제 #1
0
    def test_save_trks(self):
        X = _get_image(30, 30)
        y = np.random.randint(low=0, high=10, size=X.shape)
        lineage = [dict()]

        try:
            tempdir = tempfile.mkdtemp()  # create dir
            with pytest.raises(ValueError):
                badfilename = os.path.join(tempdir, 'x.trk')
                utils.save_trks(badfilename, lineage, X, y)

            filename = os.path.join(tempdir, 'x.trks')
            utils.save_trks(filename, lineage, X, y)
            assert os.path.isfile(filename)

            # test saved tracks can be loaded
            loaded = utils.load_trks(filename)
            assert loaded['lineages'] == lineage
            np.testing.assert_array_equal(X, loaded['X'])
            np.testing.assert_array_equal(y, loaded['y'])

        finally:
            try:
                shutil.rmtree(tempdir)  # delete directory
            except OSError as exc:
                if exc.errno != errno.ENOENT:  # no such file or directory
                    raise  # re-raise exception
예제 #2
0
def benchmark_division_performance(trk_gt, trk_res, path_gt, path_res):
    """Compare two related .trk files (one being the GT of the other) and meaasure
    performance on the the divisions in the GT file. This function produces two .txt
    documents as a by-product (ISBI-style lineage documents)

    # TODO: there should be an option to not write the files but compare in memory

    Args:
        trk_gt (path): Path to the ground truth .trk file.
        trk_res (path): Path to the predicted results .trk file.
        path_gt (path): Desired destination path for the GT ISBI-style .txt file.
        path_res (path): Desired destination path for the result ISBI-style .txt file.

    Returns:
        dict: Diciontary of all division statistics.
    """
    # Identify nodes with parent attribute
    # Load both .trk
    trks = load_trks(trk_gt)
    lineage_gt, _, y_gt = trks['lineages'][0], trks['X'], trks['y']
    trks = load_trks(trk_res)
    lineage_res, _, y_res = trks['lineages'][0], trks['X'], trks['y']

    # Produce ISBI style text doc to work with
    trk_to_isbi(lineage_gt, path_gt)
    trk_to_isbi(lineage_res, path_res)

    # Match up labels in GT to Results to allow for direct comparisons
    cells_gt, cells_res = match_nodes(y_gt, y_res)

    if len(np.unique(cells_res)) < len(np.unique(cells_gt)):
        node_key = {r: g for g, r in zip(cells_gt, cells_res)}
        # node_key maps gt nodes onto resnodes so must be applied to gt
        G_res = txt_to_graph(path_res, node_key=node_key)
        G_gt = txt_to_graph(path_gt)
        div_results = classify_divisions(G_gt, G_res)
    else:
        node_key = {g: r for g, r in zip(cells_gt, cells_res)}
        G_res = txt_to_graph(path_res)
        G_gt = txt_to_graph(path_gt, node_key=node_key)
        div_results = classify_divisions(G_gt, G_res)

    return div_results
예제 #3
0
    def __init__(self, path=None, tracked_data=None,
                 appearance_dim=32, distance_threshold=64):
        if tracked_data:
            training_data = tracked_data
        elif path:
            training_data = load_trks(path)
        else:
            raise ValueError('One of `tracked_data` or `path` is required')
        self.X = training_data['X'].astype('float32')
        self.y = training_data['y'].astype('int32')
        self.lineages = training_data['lineages']
        if not len(self.X) == len(self.y) == len(self.lineages):
            raise ValueError(
                'The data do not share the same batch size. '
                'Please make sure you are using a valid .trks file')
        self.appearance_dim = appearance_dim
        self.distance_threshold = distance_threshold

        # Correct lineages and remove bad batches
        self._correct_lineages()

        # Create feature dictionaries
        features_dict = self._get_features()

        self.appearances = features_dict['appearances']
        self.morphologies = features_dict['morphologies']
        self.centroids = features_dict['centroids']

        # Convert adj matrices to sparse
        self.adj_matrices = self._get_sparse(
            features_dict['adj_matrix'])
        self.norm_adj_matrices = self._get_sparse(
            normalize_adj_matrix(features_dict['adj_matrix']))
        self.temporal_adj_matrices = self._get_sparse(
            features_dict['temporal_adj_matrix'])

        self.mask = features_dict['mask']
        self.track_length = features_dict['track_length']
    def test_track_cells(self):
        frames = 10
        track_length = 3
        labels_per_frame = 3

        # TODO: test detected divisions
        # TODO: test creating new track

        # TODO: Fix for channels_first
        for data_format in ('channels_last', ):  # 'channels_first'):

            y1 = get_annotated_movie(img_size=256,
                                     labels_per_frame=labels_per_frame,
                                     frames=frames,
                                     mov_type='sequential',
                                     seed=1,
                                     data_format=data_format)
            y2 = get_annotated_movie(img_size=256,
                                     labels_per_frame=labels_per_frame * 2,
                                     frames=frames,
                                     mov_type='sequential',
                                     seed=2,
                                     data_format=data_format)
            y3 = get_annotated_movie(img_size=256,
                                     labels_per_frame=labels_per_frame,
                                     frames=frames,
                                     mov_type='sequential',
                                     seed=3,
                                     data_format=data_format)

            y = np.concatenate((y1, y2, y3))

            x = np.random.random(y.shape)

            tracker = tracking.CellTracker(x,
                                           y,
                                           tracking_model=DummyModel(),
                                           neighborhood_encoder=DummyEncoder(),
                                           track_length=track_length,
                                           data_format=data_format)

            tracker.track_cells()

            # test tracker.dataframe
            df = tracker.dataframe(cell_type='test-value')
            assert isinstance(df, pd.DataFrame)
            assert 'cell_type' in df.columns  # pylint: disable=E1135

            # test incorrect values in tracker.dataframe
            with pytest.raises(ValueError):
                tracker.dataframe(bad_value=-1)

            try:
                # test tracker.postprocess
                tempdir = tempfile.mkdtemp()  # create dir
                path = os.path.join(tempdir, 'postprocess.xyz')
                tracker.postprocess(filename=path)
                post_saved_path = os.path.join(tempdir, 'postprocess.trk')
                assert os.path.isfile(post_saved_path)

                # test tracker.dump
                path = os.path.join(tempdir, 'test.xyz')
                tracker.dump(path)
                dump_saved_path = os.path.join(tempdir, 'test.trk')
                assert os.path.isfile(dump_saved_path)

                # utility tests for loading trk files
                # TODO: move utility tests into utils_test.py

                # test trk_folder_to_trks
                utils.trk_folder_to_trks(tempdir,
                                         os.path.join(tempdir, 'all.trks'))
                assert os.path.isfile(os.path.join(tempdir, 'all.trks'))

                # test load_trks
                data = utils.load_trks(post_saved_path)
                assert isinstance(data['lineages'], list)
                assert all(isinstance(d, dict) for d in data['lineages'])
                np.testing.assert_equal(data['X'], tracker.X)
                np.testing.assert_equal(data['y'], tracker.y_tracked)
                # load trks instead of trk
                data = utils.load_trks(os.path.join(tempdir, 'all.trks'))

                # test trks_stats
                utils.trks_stats(os.path.join(tempdir, 'test.trk'))
            finally:
                try:
                    shutil.rmtree(tempdir)  # delete directory
                except OSError as exc:
                    if exc.errno != errno.ENOENT:  # no such file or directory
                        raise  # re-raise exception
예제 #5
0
    def test_track_cells(self):
        length = 128
        frames = 5
        track_length = 2

        features = ['appearance', 'neighborhood', 'regionprop', 'distance']

        # TODO: Fix for channels_first
        for data_format in ('channels_last', ):  # 'channels_first'):

            x, y = _get_dummy_tracking_data(length,
                                            frames=frames,
                                            data_format=data_format)

            tracker = tracking.CellTracker(x,
                                           y,
                                           model=DummyModel(),
                                           track_length=track_length,
                                           data_format=data_format,
                                           features=features)

            tracker.track_cells()

            # test tracker.dataframe
            df = tracker.dataframe(cell_type='test-value')
            assert isinstance(df, pd.DataFrame)
            assert 'cell_type' in df.columns  # pylint: disable=E1135

            # test incorrect values in tracker.dataframe
            with pytest.raises(ValueError):
                tracker.dataframe(bad_value=-1)

            try:
                # test tracker.postprocess
                tempdir = tempfile.mkdtemp()  # create dir
                path = os.path.join(tempdir, 'postprocess.xyz')
                tracker.postprocess(filename=path)
                post_saved_path = os.path.join(tempdir, 'postprocess.trk')
                assert os.path.isfile(post_saved_path)

                # test tracker.dump
                path = os.path.join(tempdir, 'test.xyz')
                tracker.dump(path)
                dump_saved_path = os.path.join(tempdir, 'test.trk')
                assert os.path.isfile(dump_saved_path)

                # utility tests for loading trk files
                # TODO: move utility tests into utils_test.py

                # test trk_folder_to_trks
                utils.trk_folder_to_trks(tempdir,
                                         os.path.join(tempdir, 'all.trks'))
                assert os.path.isfile(os.path.join(tempdir, 'all.trks'))

                # test load_trks
                data = utils.load_trks(post_saved_path)
                assert isinstance(data['lineages'], list)
                assert all(isinstance(d, dict) for d in data['lineages'])
                np.testing.assert_equal(data['X'], tracker.x)
                np.testing.assert_equal(data['y'], tracker.y_tracked)
                # load trks instead of trk
                data = utils.load_trks(os.path.join(tempdir, 'all.trks'))

                # test trks_stats
                utils.trks_stats(os.path.join(tempdir, 'test.trk'))
            finally:
                try:
                    shutil.rmtree(tempdir)  # delete directory
                except OSError as exc:
                    if exc.errno != errno.ENOENT:  # no such file or directory
                        raise  # re-raise exception