def test_two_isolated_steppers_one_gapped(self): N = 5 Y = 25 # Begin second feature one frame later than the first, # so the particle labeling (0, 1) is established and not arbitrary. a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) a = a.drop(3).reset_index(drop=True) b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)}) f = pd.concat([a, b]) expected = f.copy() expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) expected.reset_index(drop=True, inplace=True) actual = self.link(f, 5) assert_traj_equal(actual, expected) # link_df_iter() tests not performed, because hash_size is # not knowable from the first frame alone. # Sort rows by frame (normal use) actual = self.link(pandas_sort(f, 'frame'), 5) assert_traj_equal(actual, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link(f1, 5) assert_traj_equal(actual, expected)
def test_topn(self): self.check_skip() L = 21 dims = (L, L + 2) # avoid square images in tests cols = ['y', 'x'] PRECISION = 0.1 # top 2 pos1 = np.array([7, 7]) pos2 = np.array([14, 14]) pos3 = np.array([7, 14]) image = np.ones(dims, dtype='uint8') draw_point(image, pos1, 100) draw_point(image, pos2, 90) draw_point(image, pos3, 80) actual = tp.locate(image, 5, 1, topn=2, preprocess=False, engine=self.engine)[cols] actual = pandas_sort(actual, ['x', 'y']) # sort for reliable comparison expected = pandas_sort(DataFrame([pos1, pos2], columns=cols), ['x', 'y']) assert_allclose(actual, expected, atol=PRECISION) # top 1 actual = tp.locate(image, 5, 1, topn=1, preprocess=False, engine=self.engine)[cols] actual = pandas_sort(actual, ['x', 'y']) # sort for reliable comparison expected = pandas_sort(DataFrame([pos1], columns=cols), ['x', 'y']) assert_allclose(actual, expected, atol=PRECISION)
def test_two_isolated_steppers_one_gapped(self): N = 5 Y = 25 # Begin second feature one frame later than the first, # so the particle labeling (0, 1) is established and not arbitrary. a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) a = a.drop(3).reset_index(drop=True) b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)}) f = pd.concat([a, b]) expected = f.copy() expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) expected.reset_index(drop=True, inplace=True) actual = self.link_df(f, 5) assert_traj_equal(actual, expected) # link_df_iter() tests not performed, because hash_size is # not knowable from the first frame alone. # Sort rows by frame (normal use) actual = self.link_df(pandas_sort(f, 'frame'), 5) assert_traj_equal(actual, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5) assert_traj_equal(actual, expected)
def test_isolated_continuous_random_walks(self): # Two 2D random walks np.random.seed(0) N = 30 Y = 250 M = 20 # margin, because negative values raise OutOfHash a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)}) b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)}) f = pd.concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(2*M, Y + 2*M)) assert_frame_equal(actual_iter, expected) # Many 2D random walks np.random.seed(0) initial_positions = [(100, 100), (200, 100), (100, 200), (200, 200)] import itertools c = itertools.count() def walk(x, y): i = next(c) return DataFrame({'x': x + random_walk(N - i), 'y': y + random_walk(N - i), 'frame': np.arange(i, N)}) f = pd.concat([walk(*pos) for pos in initial_positions]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(200 + M, 200 + M)) assert_frame_equal(actual_iter, expected)
def test_memory_on_one_gap(self): N = 5 Y = 2 # Begin second feature one frame later than the first, so the particle labeling (0, 1) is # established and not arbitrary. a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)}) a = a.drop(3).reset_index(drop=True) f = pd.concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.array([0, 0, 0, 0]), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) expected.reset_index(drop=True, inplace=True) actual = self.link_df(f, 5, memory=1) assert_traj_equal(actual, expected) # Sort rows by frame (normal use) actual = self.link_df(pandas_sort(f, 'frame'), 5, memory=1) assert_traj_equal(actual, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5, memory=1) assert_traj_equal(actual, expected)
def test_two_isolated_steppers(self): N = 5 Y = 25 # Begin second feature one frame later than the first, so the particle labeling (0, 1) is # established and not arbitrary. a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)}) f = pd.concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50)) assert_frame_equal(actual_iter, expected) # Sort rows by frame (normal use) actual = self.link_df(pandas_sort(f, 'frame'), 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(pandas_sort(f, 'frame'), 5, hash_size=(50, 50)) assert_frame_equal(actual_iter, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50)) assert_frame_equal(actual_iter, expected)
def test_memory_on_one_gap(self): N = 5 Y = 2 # Begin second feature one frame later than the first, so the particle labeling (0, 1) is # established and not arbitrary. a = DataFrame({ 'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N) }) b = DataFrame({ 'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N) }) a = a.drop(3).reset_index(drop=True) f = pandas_concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate( [np.array([0, 0, 0, 0]), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) expected.reset_index(drop=True, inplace=True) actual = self.link(f, 5, memory=1) assert_traj_equal(actual, expected) # Sort rows by frame (normal use) actual = self.link(pandas_sort(f, 'frame'), 5, memory=1) assert_traj_equal(actual, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link(f1, 5, memory=1) assert_traj_equal(actual, expected)
def assert_traj_equal(actual, expected, pos_atol=1): assert_equal(len(actual), len(expected)) actual = pandas_sort(actual, 'frame').reset_index(drop=True) expected = pandas_sort(expected, 'frame').reset_index(drop=True) actual_order = [] for frame_no in expected['frame'].unique(): actual_f = actual[actual['frame'] == frame_no] expected_f = expected[expected['frame'] == frame_no] assert_equal(len(actual_f), len(expected_f), err_msg='Actual and expected numbers of features ' 'differ in frame %i' % frame_no) tree = cKDTree(actual_f[['y', 'x']].values) devs, argsort = tree.query(expected_f[['y', 'x']].values) assert_allclose(devs, 0., atol=pos_atol) actual_order.extend(actual_f.index[argsort].tolist()) actual = actual.loc[actual_order].reset_index(drop=True, inplace=False) for p_actual in actual.particle.unique(): actual_ind = actual.index[actual['particle'] == p_actual] p_expected = expected.loc[actual_ind[0], 'particle'] expected_ind = expected.index[expected['particle'] == p_expected] assert_array_equal(actual_ind, expected_ind, err_msg='Actual and expected linking results ' 'differ for actual particle %i/expected particle %i' '' % (p_actual, p_expected))
def test_isolated_continuous_random_walks(self): # Two 2D random walks np.random.seed(0) N = 30 Y = 250 M = 20 # margin, because negative values raise OutOfHash a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)}) b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)}) f = pandas_concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(2*M, Y + 2*M)) assert_traj_equal(actual_iter, expected) # Many 2D random walks np.random.seed(0) initial_positions = [(100, 100), (200, 100), (100, 200), (200, 200)] import itertools c = itertools.count() def walk(x, y): i = next(c) return DataFrame({'x': x + random_walk(N - i), 'y': y + random_walk(N - i), 'frame': np.arange(i, N)}) f = pandas_concat([walk(*pos) for pos in initial_positions]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(200 + M, 200 + M)) assert_traj_equal(actual_iter, expected)
def test_two_isolated_steppers(self): N = 5 Y = 25 # Begin second feature one frame later than the first, so the particle labeling (0, 1) is # established and not arbitrary. a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)}) f = pandas_concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50)) assert_traj_equal(actual_iter, expected) # Sort rows by frame (normal use) actual = self.link_df(pandas_sort(f, 'frame'), 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(pandas_sort(f, 'frame'), 5, hash_size=(50, 50)) assert_traj_equal(actual_iter, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50)) assert_traj_equal(actual_iter, expected)
def test_nearby_continuous_random_walks(self): # Two 2D random walks np.random.seed(0) N = 30 Y = 250 M = 20 # margin, because negative values raise OutOfHash a = DataFrame({ 'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N) }) b = DataFrame({ 'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N) }) f = pandas_concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_traj_equal(actual, expected) actual = self.link_df_iter(f, 5, hash_size=(2 * M, 2 * M + Y)) assert_traj_equal(actual, expected) # Several 2D random walks np.random.seed(0) initial_positions = [(10, 11), (10, 18), (14, 15), (20, 21), (13, 13), (10, 10), (17, 19)] import itertools c = itertools.count() def walk(x, y): i = next(c) return DataFrame({ 'x': x + random_walk(N - i), 'y': y + random_walk(N - i), 'frame': np.arange(i, N) }) f = pandas_concat([walk(*pos) for pos in initial_positions]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate( [i * np.ones(N - i) for i in range(len(initial_positions))]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_traj_equal(actual, expected) actual = self.link_df_iter(f, 5, hash_size=(2 * M, 2 * M)) assert_traj_equal(actual, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5) assert_traj_equal(actual, expected) actual = self.link_df_iter(f1, 5, hash_size=(2 * M, 2 * M)) assert_traj_equal(actual, expected)
def compare(shape, count, radius, noise_level, engine): radius = tp.utils.validate_tuple(radius, len(shape)) # tp.locate ignores a margin of size radius, take 1 px more to be safe margin = tuple([r + 1 for r in radius]) diameter = tuple([(r * 2) + 1 for r in radius]) draw_range = tuple([d * 3 for d in diameter]) cols = ['x', 'y', 'z'][:len(shape)][::-1] pos = gen_nonoverlapping_locations(shape, count, draw_range, margin) image = draw_spots(shape, pos, draw_range, noise_level) f = tp.locate(image, diameter, engine=engine) actual = pandas_sort(f[cols], cols) expected = pandas_sort(DataFrame(pos, columns=cols), cols) return actual, expected
def test_two_nearby_steppers(self): N = 5 Y = 2 # Begin second feature one frame later than the first, so the particle labeling (0, 1) is # established and not arbitrary. a = DataFrame({ 'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N) }) b = DataFrame({ 'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N) }) f = pandas_concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50)) assert_traj_equal(actual_iter, expected) # Sort rows by frame (normal use) actual = self.link_df(pandas_sort(f, 'frame'), 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(pandas_sort(f, 'frame'), 5, hash_size=(50, 50)) assert_traj_equal(actual_iter, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5) assert_traj_equal(actual, expected) actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50)) assert_traj_equal(actual_iter, expected) if self.do_diagnostics: assert 'diag_subnet' in self.diag.columns assert 'diag_subnet_size' in self.diag.columns # Except for frame in which they appear, all particles should have # been labeled with a search_range assert not any( self.diag['diag_search_range'][actual_iter.frame > 1].isnull()) # The number of loop iterations is reported by the numba linker only if self.linker_opts['link_strategy'] == 'numba': assert 'diag_subnet_iterations' in self.diag.columns
def test_penalty(self): """A test case of two particles, spaced 8 and each moving by 8 down and 7 to the right. We have two likely linking results: 1. two links, total squared displacement = 2*(8**2 + 7**2) = 226 2. one link, total squared displacement = (8**2 + 1**2) + sr**2 Case 2 gets a penalty for not linking, which equals the search range squared. We vary this in this test. With a penalty of 13, case 2 has a total cost of 234 and we expect case 1. as the result. With a penalty of 12, case 2. will have a total cost of 209 and we expect case 2. as the result. """ f = pd.DataFrame({'x': [0, 8, 7, 8 + 7], 'y': [0, 0, 8, 8], 'frame': [0, 0, 1, 1]}) case1 = f.copy() case1['particle'] = np.array([0, 1, 0, 1]) case2 = f.copy() case2['particle'] = np.array([0, 1, 1, 2]) actual = self.link_df(f, 13) pandas_sort(case1, ['x'], inplace=True) pandas_sort(actual, ['x'], inplace=True) assert_array_equal(actual['particle'].values.astype(np.int), case1['particle'].values.astype(np.int)) actual = self.link_df(f, 12) pandas_sort(case2, ['x'], inplace=True) pandas_sort(actual, ['x'], inplace=True) assert_array_equal(actual['particle'].values.astype(np.int), case2['particle'].values.astype(np.int))
def test_nearby_continuous_random_walks(self): # Two 2D random walks np.random.seed(0) N = 30 Y = 250 M = 20 # margin, because negative values raise OutOfHash a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)}) b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)}) f = pd.concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_frame_equal(actual, expected) actual = self.link_df_iter(f, 5, hash_size=(2*M, 2*M + Y)) assert_frame_equal(actual, expected) # Several 2D random walks np.random.seed(0) initial_positions = [(10, 11), (10, 18), (14, 15), (20, 21), (13, 13), (10, 10), (17, 19)] import itertools c = itertools.count() def walk(x, y): i = next(c) return DataFrame({'x': x + random_walk(N - i), 'y': y + random_walk(N - i), 'frame': np.arange(i, N)}) f = pd.concat([walk(*pos) for pos in initial_positions]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_frame_equal(actual, expected) actual = self.link_df_iter(f, 5, hash_size=(2*M, 2*M)) assert_frame_equal(actual, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5) assert_frame_equal(actual, expected) actual = self.link_df_iter(f1, 5, hash_size=(2*M, 2*M)) assert_frame_equal(actual, expected)
def link(self, f, search_range, *args, **kwargs): if 'pos_columns' in kwargs: raise nose.SkipTest('Skipping find_link tests with custom pos_columns.') # the minimal spacing between features in f is assumed to be 1. # from scipy.spatial import cKDTree # mindist = 1e7 # for _, _f in f.groupby('frame'): # dists, _ = cKDTree(_f[['y', 'x']].values).query(_f[['y', 'x']].values, k=2) # mindist = min(mindist, dists[:, 1].min()) # print("Minimal dist is {0:.3f}".format(mindist)) kwargs = dict(self.linker_opts, **kwargs) size = 3 separation = kwargs['separation'] f = f.copy() f[['y', 'x']] *= separation topleft = (f[['y', 'x']].min().values - 4 * separation).astype(np.int) f[['y', 'x']] -= topleft shape = (f[['y', 'x']].max().values + 4 * separation).astype(np.int) reader = CoordinateReader(f, shape, size) if kwargs.get('adaptive_stop', None) is not None: kwargs['adaptive_stop'] *= separation result = find_link(reader, search_range=search_range*separation, *args, **kwargs) result = pandas_sort(result, ['particle', 'frame']).reset_index(drop=True) result[['y', 'x']] += topleft result[['y', 'x']] /= separation return result
def link(self, f, search_range, *args, **kwargs): # the minimal spacing between features in f is assumed to be 1. # from scipy.spatial import cKDTree # mindist = 1e7 # for _, _f in f.groupby('frame'): # dists, _ = cKDTree(_f[['y', 'x']].values).query(_f[['y', 'x']].values, k=2) # mindist = min(mindist, dists[:, 1].min()) # print("Minimal dist is {0:.3f}".format(mindist)) kwargs = dict(self.linker_opts, **kwargs) size = 3 separation = kwargs['separation'] f = f.copy() f[['y', 'x']] *= separation topleft = (f[['y', 'x']].min().values - 4 * separation).astype(np.int) f[['y', 'x']] -= topleft shape = (f[['y', 'x']].max().values + 4 * separation).astype(np.int) reader = CoordinateReader(f, shape, size) if kwargs.get('adaptive_stop', None) is not None: kwargs['adaptive_stop'] *= separation result = find_link(reader, search_range=search_range * separation, *args, **kwargs) result = pandas_sort(result, ['particle', 'frame']).reset_index(drop=True) result[['y', 'x']] += topleft result[['y', 'x']] /= separation return result
def link_df_iter(self, *args, **kwargs): kwargs.update(self.linker_opts) kwargs['diagnostics'] = self.do_diagnostics args = list(args) features = args.pop(0) res = pd.concat(tp.link_df_iter( (df for fr, df in features.groupby('frame')), *args, **kwargs)) return pandas_sort(res, ['particle', 'frame']).reset_index(drop=True)
def link_df_iter(self, *args, **kwargs): kwargs.update(self.linker_opts) kwargs['diagnostics'] = self.do_diagnostics args = list(args) features = args.pop(0) res = pandas_concat(link_df_iter( (df for fr, df in features.groupby('frame')), *args, **kwargs)) return pandas_sort(res, ['particle', 'frame']).reset_index(drop=True)
def conformity(df): """ Organize toy data to look like real data. Be strict about dtypes: particle is a float and frame is an integer.""" df['frame'] = df['frame'].astype(np.int) df['particle'] = df['particle'].astype(np.float) df['x'] = df['x'].astype(np.float) df['y'] = df['y'].astype(np.float) df.set_index('frame', drop=False, inplace=True) return pandas_sort(df, by=['frame', 'particle'])
def setUp(self): N = 10 Y = 1 a = DataFrame({ 'x': np.zeros(N), 'y': np.zeros(N), 'frame': np.arange(N), 'particle': np.zeros(N) }) b = DataFrame({ 'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1), 'frame': np.arange(1, N), 'particle': np.ones(N - 1) }) self.dead_still = conformity(pd.concat([a, b])) pandas_sort(self.dead_still, ['frame', 'particle'], inplace=True) P = 1000 # particles A = 0.00001 # step amplitude np.random.seed(0) particles = [ DataFrame({ 'x': A * random_walk(N), 'y': A * random_walk(N), 'frame': np.arange(N), 'particle': i }) for i in range(P) ] self.many_walks = conformity(pd.concat(particles)) a = DataFrame({ 'x': np.arange(N), 'y': np.zeros(N), 'frame': np.arange(N), 'particle': np.zeros(N) }) b = DataFrame({ 'x': np.arange(1, N), 'y': Y + np.zeros(N - 1), 'frame': np.arange(1, N), 'particle': np.ones(N - 1) }) self.steppers = conformity(pd.concat([a, b]))
def link(self, f, search_range, *args, **kwargs): def df_iter(f, first_frame, last_frame): """ link_df_iter requires a generator of dataframes """ for t in range(first_frame, last_frame + 1): yield f[f['frame'] == t] res_iter = link_df_iter(df_iter(f, 0, int(f['frame'].max())), search_range, *args, **kwargs) res = pandas_concat(res_iter) return pandas_sort(res, ['particle', 'frame']).reset_index(drop=True)
def link(self, f, search_range, *args, **kwargs): def df_iter(f, first_frame, last_frame): """ link_df_iter requires a generator of dataframes """ for t in range(first_frame, last_frame + 1): yield f[f['frame'] == t] res_iter = link_df_iter(df_iter(f, 0, int(f['frame'].max())), search_range, *args, **kwargs) res = pd.concat(res_iter) return pandas_sort(res, ['particle', 'frame']).reset_index(drop=True)
def test_two_nearby_steppers(self): N = 5 Y = 2 # Begin second feature one frame later than the first, so the particle labeling (0, 1) is # established and not arbitrary. a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)}) b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)}) f = pd.concat([a, b]) expected = f.copy().reset_index(drop=True) expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)]) pandas_sort(expected, ['particle', 'frame'], inplace=True) actual = self.link_df(f, 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50)) assert_frame_equal(actual_iter, expected) # Sort rows by frame (normal use) actual = self.link_df(pandas_sort(f, 'frame'), 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(pandas_sort(f, 'frame'), 5, hash_size=(50, 50)) assert_frame_equal(actual_iter, expected) # Shuffle rows (crazy!) np.random.seed(0) f1 = f.reset_index(drop=True) f1.reindex(np.random.permutation(f1.index)) actual = self.link_df(f1, 5) assert_frame_equal(actual, expected) actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50)) assert_frame_equal(actual_iter, expected) if self.do_diagnostics: assert 'diag_subnet' in self.diag.columns assert 'diag_subnet_size' in self.diag.columns # Except for frame in which they appear, all particles should have # been labeled with a search_range assert not any(self.diag['diag_search_range'][ actual_iter.frame > 1].isnull()) # The number of loop iterations is reported by the numba linker only if self.linker_opts['link_strategy'] == 'numba': assert 'diag_subnet_iterations' in self.diag.columns
def setUp(self): N = 10 Y = 1 a = DataFrame({'x': np.zeros(N), 'y': np.zeros(N), 'frame': np.arange(N), 'particle': np.zeros(N)}) b = DataFrame({'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1), 'frame': np.arange(1, N), 'particle': np.ones(N - 1)}) self.dead_still = conformity(pd.concat([a, b])) pandas_sort(self.dead_still, ['frame', 'particle'], inplace=True) P = 1000 # particles A = 0.00001 # step amplitude np.random.seed(0) particles = [DataFrame({'x': A*random_walk(N), 'y': A*random_walk(N), 'frame': np.arange(N), 'particle': i}) for i in range(P)] self.many_walks = conformity(pd.concat(particles)) a = DataFrame({'x': np.arange(N), 'y': np.zeros(N), 'frame': np.arange(N), 'particle': np.zeros(N)}) b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1), 'frame': np.arange(1, N), 'particle': np.ones(N - 1)}) self.steppers = conformity(pd.concat([a, b]))
def link(self, f, search_range, *args, **kwargs): def f_iter(f, first_frame, last_frame): """ link_iter requires an (optionally enumerated) generator of ndarrays """ for t in np.arange(first_frame, last_frame + 1, dtype=f['frame'].dtype): f_filt = f[f['frame'] == t] yield t, f_filt[['y', 'x']].values res = f.copy() res['particle'] = -1 for t, ids in link_simple_iter(f_iter(f, 0, int(f['frame'].max())), search_range, *args, **kwargs): res.loc[res['frame'] == t, 'particle'] = ids return pandas_sort(res, ['particle', 'frame']).reset_index(drop=True)
def link(self, f, search_range, *args, **kwargs): pos_columns = kwargs.pop('pos_columns', ['y', 'x']) def f_iter(f, first_frame, last_frame): """ link_iter requires an (optionally enumerated) generator of ndarrays """ for t in np.arange(first_frame, last_frame + 1, dtype=f['frame'].dtype): f_filt = f[f['frame'] == t] yield t, f_filt[pos_columns].values res = f.copy() res['particle'] = -1 for t, ids in link_iter(f_iter(f, 0, int(f['frame'].max())), search_range, *args, **kwargs): res.loc[res['frame'] == t, 'particle'] = ids return pandas_sort(res, ['particle', 'frame']).reset_index(drop=True)
def link_df(self, f, search_range, *args, **kwargs): kwargs = dict(self.linker_opts, **kwargs) size = 3 separation = kwargs['separation'] f = f.copy() f[['y', 'x']] *= separation topleft = (f[['y', 'x']].min().values - 4 * separation).astype(np.int) f[['y', 'x']] -= topleft shape = (f[['y', 'x']].max().values + 4 * separation).astype(np.int) reader = CoordinateReader(f, shape, size) result = find_link(reader, search_range=search_range*separation, *args, **kwargs) result = pandas_sort(result, ['particle', 'frame']).reset_index(drop=True) result[['y', 'x']] += topleft result[['y', 'x']] /= separation return result
def test_minmass_maxsize(self): # Test the mass- and sizebased filtering here on 4 different features. self.check_skip() L = 64 dims = (L, L + 2) cols = ['y', 'x'] PRECISION = 1 # we are not testing for subpx precision here image = np.zeros(dims, dtype=np.uint8) pos1 = np.array([15, 20]) pos2 = np.array([40, 40]) pos3 = np.array([25, 50]) pos4 = np.array([35, 15]) draw_feature(image, pos1, size=2.5) draw_feature(image, pos2, size=5) draw_feature(image, pos3, size=0.8) draw_feature(image, pos4, size=3.2) # filter on mass actual = tp.locate(image, 15, engine=self.engine, preprocess=False, minmass=6500, separation=10)[cols] actual = pandas_sort(actual, cols) expected = pandas_sort(DataFrame([pos2, pos4], columns=cols), cols) assert_allclose(actual, expected, atol=PRECISION) # filter on size actual = tp.locate(image, 15, engine=self.engine, preprocess=False, maxsize=3.0, separation=10)[cols] actual = pandas_sort(actual, cols) expected = pandas_sort(DataFrame([pos1, pos3], columns=cols), cols) assert_allclose(actual, expected, atol=PRECISION) # filter on both mass and size actual = tp.locate(image, 15, engine=self.engine, preprocess=False, minmass=600, maxsize=4.0, separation=10)[cols] actual = pandas_sort(actual, cols) expected = pandas_sort(DataFrame([pos1, pos4], columns=cols), cols) assert_allclose(actual, expected, atol=PRECISION)