def Detect(estimateFeatureSize, CameraName, minMass = None, dynamicMinMass = False): ImagePath = 'E:/BubbleRisingUltimate/4mmGasBubbleRising/4mmGasBubbleRising' Path = os.path.join(ImagePath, CameraName) frames = pims.open(Path) print('Valid frames length is %d' %len(frames)) # find five brightest if not minMass: f = tp.locate(frames[testFrame], estimateFeatureSize) mass = list(f['mass']); mass.sort() minMass = int(mass[-3]*0.9 + mass[-1]*0.1) print(minMass) #TopTen = np.argsort(f['mass'])[-5:] #TopTenArray = f['mass'][TopTen] # show mass histogram # show subpixel accuracy of the detection #minMass = list(TopTenArray)[0] f = tp.locate(frames[testFrame], estimateFeatureSize, minmass= minMass) plt.figure() tp.annotate(f, frames[testFrame]); # run batch processing for all frames if dynamicMinMass: f = f[0:0] for i in range(len(frames)): f_ele = tp.locate(frames[i], estimateFeatureSize) mass = list(f_ele['mass']); mass.sort() minMass = int(mass[-2]*0.9 + mass[-1]*0.1) f_ele = tp.locate(frames[i], estimateFeatureSize, minmass = minMass) f = f.append(f_ele) else: f = tp.batch(frames, estimateFeatureSize, minmass = minMass) return f, frames
def test_flat_peak(self): # This tests the part of locate_maxima that eliminates multiple # maxima in the same mask area. self.check_skip() image = np.ones((21, 23)).astype(np.uint8) image[11, 13] = 100 image[11, 14] = 100 image[12, 13] = 100 count = len(tp.locate(image, 5, preprocess=False)) self.assertEqual(count, 1) image = np.ones((21, 23)).astype(np.uint8) image[11, 13] = 100 image[11, 14] = 100 image[12, 13] = 100 image[12, 13] = 100 count = len(tp.locate(image, 5, preprocess=False)) self.assertEqual(count, 1) image = np.ones((21, 23)).astype(np.uint8) image[11, 13] = 100 image[11, 14] = 100 image[11, 15] = 100 count = len(tp.locate(image, 5, preprocess=False)) self.assertEqual(count, 1)
def test_eccentricity(self): # Eccentricity (elongation) is measured with good accuracy and # ~0.02 precision, as long as the mask is large enough to cover # the whole object. self.check_skip() L = 501 dims = (L + 2, L) # avoid square images in tests pos = [50, 55] cols = ['y', 'x'] ECC = 0 image = np.ones(dims, dtype='uint8') draw_feature(image, pos, 27, ecc=ECC) actual = tp.locate(image, 21, 1, preprocess=False, engine=self.engine)['ecc'] expected = ECC assert_allclose(actual, expected, atol=0.02) ECC = 0.2 image = np.ones(dims, dtype='uint8') draw_feature(image, pos, 27, ecc=ECC) actual = tp.locate(image, 21, 1, preprocess=False, engine=self.engine)['ecc'] expected = ECC assert_allclose(actual, expected, atol=0.1) ECC = 0.5 image = np.ones(dims, dtype='uint8') draw_feature(image, pos, 27, ecc=ECC) actual = tp.locate(image, 21, 1, preprocess=False, engine=self.engine)['ecc'] expected = ECC assert_allclose(actual, expected, atol=0.1)
def test_topn(self): self.check_skip() L = 21 dims = (L, L + 2) # avoid square images in tests cols = ['y', 'x'] PRECISION = 0.1 # top 2 pos1 = np.array([7, 7]) pos2 = np.array([14, 14]) pos3 = np.array([7, 14]) image = np.ones(dims, dtype='uint8') draw_point(image, pos1, 100) draw_point(image, pos2, 90) draw_point(image, pos3, 80) actual = tp.locate(image, 5, 1, topn=2, preprocess=False, engine=self.engine)[cols] actual = actual.sort(['x', 'y']) # sort for reliable comparison expected = DataFrame([pos1, pos2], columns=cols).sort(['x', 'y']) assert_allclose(actual, expected, atol=PRECISION) # top 1 actual = tp.locate(image, 5, 1, topn=1, preprocess=False, engine=self.engine)[cols] actual = actual.sort(['x', 'y']) # sort for reliable comparison expected = DataFrame([pos1], columns=cols).sort(['x', 'y']) assert_allclose(actual, expected, atol=PRECISION)
def test_topn(self): self.check_skip() L = 21 dims = (L, L + 2) # avoid square images in tests cols = ['x', 'y'] PRECISION = 0.1 # top 2 pos1 = np.array([7, 7]) pos2 = np.array([14, 14]) pos3 = np.array([7, 14]) image = np.ones(dims, dtype='uint8') draw_point(image, pos1, 100) draw_point(image, pos2, 90) draw_point(image, pos3, 80) actual = tp.locate(image, 5, 1, topn=2, preprocess=False, engine=self.engine)[cols] actual = actual.sort(['x', 'y']) # sort for reliable comparison expected = DataFrame([pos1, pos2], columns=cols).sort(['x', 'y']) assert_allclose(actual, expected, atol=PRECISION) # top 1 actual = tp.locate(image, 5, 1, topn=1, preprocess=False, engine=self.engine)[cols] actual = actual.sort(['x', 'y']) # sort for reliable comparison expected = DataFrame([pos1], columns=cols).sort(['x', 'y']) assert_allclose(actual, expected, atol=PRECISION)
def test_eccentricity(self): # Eccentricity (elongation) is measured with good accuracy and # ~0.02 precision, as long as the mask is large enough to cover # the whole object. self.check_skip() L = 501 dims = (L + 2, L) # avoid square images in tests pos = [50, 55] cols = ['x', 'y'] ECC = 0 image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, 4, ecc=ECC) actual = tp.locate(image, 21, 1, preprocess=False, engine=self.engine)['ecc'] expected = ECC assert_allclose(actual, expected, atol=0.02) ECC = 0.2 image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, 4, ecc=ECC) actual = tp.locate(image, 21, 1, preprocess=False, engine=self.engine)['ecc'] expected = ECC assert_allclose(actual, expected, atol=0.1) ECC = 0.5 image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, 4, ecc=ECC) actual = tp.locate(image, 21, 1, preprocess=False, engine=self.engine)['ecc'] expected = ECC assert_allclose(actual, expected, atol=0.1)
def test_nocharacterize_a(self): pos = gen_nonoverlapping_locations((200, 300), 9, 5) image = draw_spots((200, 300), pos, (0.5, 1)) f_c = tp.locate(image, (3, 5), engine=self.engine, characterize=True) f_nc = tp.locate(image, (3, 5), engine=self.engine, characterize=False) assert len(f_nc.columns) == 3 assert_allclose(f_c.values[:, :3], f_nc.values)
def test_nocharacterize(self): pos = gen_nonoverlapping_locations((200, 300), 9, 5) image = draw_spots((200, 300), pos, 5) f_c = tp.locate(image, 5, engine=self.engine, characterize=True) f_nc = tp.locate(image, 5, engine=self.engine, characterize=False) assert len(f_nc.columns) == 3 assert_allclose(f_c.values[:, :3], f_nc.values)
def test_flat_peak(self): # This tests the part of locate_maxima that eliminates multiple # maxima in the same mask area. self.check_skip() image = np.ones((21, 23)).astype(np.uint8) image[11, 13] = 100 image[11, 14] = 100 image[12, 13] = 100 count = len(tp.locate(image, 5, preprocess=False, engine=self.engine)) self.assertEqual(count, 1) image = np.ones((21, 23)).astype(np.uint8) image[11:13, 13:15] = 100 count = len(tp.locate(image, 5, preprocess=False, engine=self.engine)) self.assertEqual(count, 1) image = np.ones((21, 23)).astype(np.uint8) image[11, 13] = 100 image[11, 14] = 100 image[11, 15] = 100 count = len(tp.locate(image, 5, preprocess=False, engine=self.engine)) self.assertEqual(count, 1) # This tests that two nearby peaks are merged by # picking the one with the brighter neighborhood. image = np.ones((21, 23)).astype(np.uint8) pos = [14, 14] draw_point(image, [11, 13], 100) draw_point(image, [11, 14], 100) draw_point(image, [11, 15], 100) draw_point(image, [14, 13], 101) draw_point(image, [14, 14], 101) draw_point(image, [14, 15], 101) cols = ['y', 'x'] actual = tp.locate(image, 5, preprocess=False, engine=self.engine)[cols] expected = DataFrame(np.asarray(pos).reshape(1, -1), columns=cols) assert_allclose(actual, expected, atol=0.1) # Break ties by sorting by position, simply to avoid # any randomness resulting from cKDTree returning a set. image = np.ones((21, 23)).astype(np.uint8) pos = [14, 14] draw_point(image, [11, 12], 100) draw_point(image, [11, 13], 100) draw_point(image, [11, 14], 100) draw_point(image, [14, 13], 100) draw_point(image, [14, 14], 100) draw_point(image, [14, 15], 100) cols = ['y', 'x'] actual = tp.locate(image, 5, preprocess=False, engine=self.engine)[cols] expected = DataFrame(np.asarray(pos).reshape(1, -1), columns=cols) assert_allclose(actual, expected, atol=0.1)
def setup(self): shape = (100, 101) r = 3 noise_level = 0.01 cases = {"sparse": 5, "dense": 100} for case_name, count in cases.items(): locations = gen_random_locations(shape, count) image = draw_spots(shape, locations, r, noise_level) setattr(self, "{0}_image".format(case_name), image) # Prime FFTW (if it's there). tp.locate(self.sparse_image, 7)
def test_warn_color_image(self): self.check_skip() # RGB-like image = np.random.randint(0, 100, (21, 23, 3)).astype(np.uint8) with assert_produces_warning(UserWarning): tp.locate(image, 5) # RGBA-like image = np.random.randint(0, 100, (21, 23, 4)).astype(np.uint8) with assert_produces_warning(UserWarning): tp.locate(image, 5)
def test_minmass_maxsize(self): # Test the mass- and sizebased filtering here on 4 different features. self.check_skip() L = 64 dims = (L, L + 2) cols = ['y', 'x'] PRECISION = 1 # we are not testing for subpx precision here image = np.zeros(dims, dtype=np.uint8) pos1 = np.array([15, 20]) pos2 = np.array([40, 40]) pos3 = np.array([25, 50]) pos4 = np.array([35, 15]) draw_feature(image, pos1, size=2.5) draw_feature(image, pos2, size=5) draw_feature(image, pos3, size=0.8) draw_feature(image, pos4, size=3.2) # filter on mass actual = tp.locate(image, 15, engine=self.engine, preprocess=False, minmass=6500, separation=10)[cols] actual = pandas_sort(actual, cols) expected = pandas_sort(DataFrame([pos2, pos4], columns=cols), cols) assert_allclose(actual, expected, atol=PRECISION) # filter on size actual = tp.locate(image, 15, engine=self.engine, preprocess=False, maxsize=3.0, separation=10)[cols] actual = pandas_sort(actual, cols) expected = pandas_sort(DataFrame([pos1, pos3], columns=cols), cols) assert_allclose(actual, expected, atol=PRECISION) # filter on both mass and size actual = tp.locate(image, 15, engine=self.engine, preprocess=False, minmass=600, maxsize=4.0, separation=10)[cols] actual = pandas_sort(actual, cols) expected = pandas_sort(DataFrame([pos1, pos4], columns=cols), cols) assert_allclose(actual, expected, atol=PRECISION)
def trackpy(state, magic=165): image = totiff(state) diameter = int(2 * state.state[state.b_rad].mean()) diameter -= 1 - diameter % 2 minmass = 100. if magic is None else magic * (diameter / 2)**3 out = locate(image, diameter=diameter, invert=True, minmass=minmass) return np.vstack([out.z, out.y, out.x]).T
def trackpy(frame,_, parameters=None, call_num=None): method_key = get_method_key('trackpy', call_num) df = tp.locate(frame, get_param_val(parameters[method_key]['size_estimate']), invert=get_param_val(parameters[method_key]['invert'])) if parameters[method_key]['get_intensities']: x = df['x'].to_numpy() y = df['y'].to_numpy() intensity = [] for i in range(np.size(x)): xc = x[i] yc = y[i] rc = get_param_val(parameters[method_key]['intensity_radius']) try: # Try because some circles overlap the edge giving meaningless answers cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)] h, w = cut_out_frame.shape[:2] mask = create_circular_mask(h, w) masked_img = cut_out_frame.copy() masked_img[~mask] = 0 value = getattr(im, parameters[method_key]['get_intensities'])(masked_img) except: value = np.Nan intensity.append(value) df['intensities'] = np.array(intensity) return df
def call_spots(self): """ Find Spots in Images """ """ ZScore Image""" zscore = self.img.copy() zscore = zscore-np.percentile(zscore.ravel(),25) zscore = zscore/np.percentile(zscore.ravel(),75) zscore[zscore<0] = 0 """ Detect Spots""" # self.parameters['spot_diameter'] = 3 # self.parameters['spot_minmass'] = 2 # self.parameters['spot_separation'] = 1 self.spots = tp.locate(zscore, self.parameters['spot_diameter'], minmass=self.parameters['spot_minmass'], separation=self.parameters['spot_separation']) """ UPDATE CHECK IF THERE ARE NO SPOTS AND ALERT USER """ if len(self.spots)==0: self.utilities.save_data('No Spots Detected', Dataset=self.dataset, Position=self.posname, Hybe=self.hybe, Channel=self.channel, Zindex=self.zindex, Type='log')
def compare(shape, count, radius, noise_level, engine): pos = gen_random_locations(shape, count) image = draw_spots(shape, pos, radius, noise_level) f = tp.locate(image, 2 * radius + 1, minmass=1800, engine=engine) actual = f[['x', 'y']].sort(['x', 'y']) expected = DataFrame(pos, columns=['x', 'y']).sort(['x', 'y']) return actual, expected
def bckgrd_pdetect(self, thld=100, progress_barF=None, Print=True, **kwargs): '''Such as: P_dfs = bckgrd_pdetect(thld=125, diameter=(5,5), minmass=300, invert=True) thld : thershold, a TRUE particle must be darker than the backgroud image by this amount progress_barF : a handle for external status bar (such as in a GUI) Print : print out this frame number that is currently processing **kwars : additional parameters passed to trackpy.locate() method Returns a pandas.DataFrame containing the information of all particles (both True and False ones) detected ''' particle_dfs = [] for _idx, _img in enumerate(self.img_stack): if progress_barF is None: pass else: progress_barF(_idx, len(self.img_stack)) _f_df = tp.locate(_img, **kwargs) if len(_f_df)>1: _f_df['ST_dsty'] = [self._area_density(_f_df.ix[i, 'x'], _f_df.ix[i, 'y'], _f_df.ix[i, 'size'], 255-self.crtd_image[_idx]) for i in range(len(_f_df))] _f_df['True_particle'] = (_f_df['ST_dsty']>thld) _f_df['Timestamp'] = _idx*(1./self.frame_rate) #if frame rate is know may convert to timestamp _f_df['frame'] = _idx #must have it to make the tracker work particle_dfs.append(_f_df) if Print: print_update('Analyzing frame %s'%_idx) self.particle_dfs = particle_dfs return particle_dfs
def locate_feature(image_path, particle_size, minmass): #image = plt.imread(image_path) image = cv.imread( image_path, -1 ) # using cv2 https://stackoverflow.com/questions/18446804/python-read-and-write-tiff-16-bit-three-channel-colour-images image = image[8:-1, :] # crop out noise # rotate by 180 (fudge) #image = np.rot90(image, 2) image = np.fliplr(image) frame = pims.Frame(image) # plt.figure(0) # plt.imshow(image) f = tp.locate(frame, particle_size, minmass=minmass) # plt.figure(1) # tp.annotate(f, frame) # plt.show(block=False) # # fig, ax = plt.subplots() # ax.hist(f['mass'], bins=20) # plt.show(block=False) return f
def method(): if self.Keys.locations not in self._pocket: # Calculate locations data_frames = [] tic = time.time() for i in range(self.n_frames): self.show_text('Calculating {}/{} ...'.format( i + 1, self.n_frames)) df = tp.locate(self.images[i], diameter, **kwargs) data_frames.append(df) console.print_progress(i + 1, self.n_frames, start_time=tic) self.put_into_pocket(self.Keys.locations, data_frames, exclusive=False) console.show_status('Locating completed. Configurations:') kwargs['diameter'] = diameter console.supplement(kwargs) # Clear status self.axes.cla() # Display dfs = self.get_from_pocket(self.Keys.locations) tp.annotate(dfs[self.cursor], self.raw_frames[self.cursor], ax=self.axes) # Set title title = ', '.join( ['{} = {}'.format(k, v) for k, v in kwargs.items()]) self.set_axes_style(title)
def find_centroids(img, **kwargs): """Locates the centroids of the features based on the specify library. In this case we are using Trackpy. .. warning:: We should deprecate the use of trackpy and made a custom-solution """ centroids = tp.locate(img, **kwargs) return centroids
def test_all_maxima_filtered(self): self.check_skip() black_image = np.ones((21, 23)).astype(np.uint8) draw_point(black_image, [11, 13], 10) with assert_produces_warning(UserWarning): f = tp.locate(black_image, 5, minmass=200, engine=self.engine, preprocess=False)
def compare(shape, count, radius, noise_level, engine): pos = gen_random_locations(shape, count) image = draw_spots(shape, pos, radius, noise_level) f = tp.locate(image, 2*radius + 1, minmass=1800, engine=engine) actual = f[['x', 'y']].sort(['x', 'y']) expected = DataFrame(pos, columns=['x', 'y']).sort(['x', 'y']) return actual, expected
def _getNuclei(self): """ Creates the dataframe containing all the cells of the Spheroid. The duplicata clean function is eliminated. Indeed, the local maximum makes it impossible for any cell to be segmented twice along the z-axis. """ df = trackpy.locate(self.NucImage[:, :, :], self.RNoyau, minmass=self.MinMass, maxsize=None, separation=self.RNoyau, noise_size=1, smoothing_size=None, threshold=None, invert=False, percentile=64, topn=self.CellNumber, preprocess=True, max_iterations=10, filter_before=None, filter_after=None, characterize=True, engine='numba') df = df.loc[df['mass'] > self.MinMass] df = df.loc[((df['x'] - df['x'].mean())**2 < 4 * df['x'].std()**2) & ((df['y'] - df['y'].mean())**2 < 4 * df['y'].std()**2)] df['label'] = range(len(df)) self.NucFrame = df
def locate(image, **kwargs): return tp.locate(image, diameter, 1, preprocess=False, engine=self.engine, **kwargs)[cols]
def detect(self, image): ''' Localize features in normalized holographic microscopy images Parameters ---------- image : array_like image data Returns ------- centers : numpy.array (x, y) coordinates of feature centers bboxes : tuple ((x0, y0), w, h) bounding box of feature ''' a = self._circletransform(image) a /= np.max(a) features = tp.locate(a, **self._tp_opts) nfeatures = len(features) if nfeatures == 0: return None, None predictions = [] for n, feature in features.iterrows(): r_p = feature[['x', 'y']] extent = self._extent(image, r_p) r0 = tuple((r_p - extent / 2).astype(int)) bbox = (r0, extent, extent) prediction = dict(x_p=r_p[0], y_p=r_p[1], bbox=bbox) predictions.append(prediction) return predictions
def locate_particles(self, *args): self.centroids = tp.locate( self.data[self.video_widget.imv.currentIndex, :, :], 9, minmass=250) x, y = self.centroids['x'].tolist(), self.centroids['y'].tolist() mass = self.centroids['mass'] hist_y, hist_x = np.histogram(mass, bins=np.linspace(min(mass), max(mass), 40)) self.analysis_widget.histogram.setData(hist_x, hist_y) imv_view = self.video_widget.imv.getView() for c in self.circles: imv_view.removeItem(c) for i in range(len(x)): self.circles.append( MyCircleOverlay(pos=(y[i] - 5, x[i] - 5), size=10, pen=pen, movable=False)) imv_view.addItem(self.circles[-1])
def quickFeaturePosition(frame, inspect=False, method=0): '''Testing cv2 quick feature detection''' if method == 0: ######## trackpy locate ######## if isinstance(frame, str): frame = cv2.imread(str(frame), cv2.IMREAD_GRAYSCALE) f = tp.locate(frame, 11, percentile=99, invert=True) notedFrame = track.stealthAnnotate(f, frame) if inspect: cv2.imshow('inspect', notedFrame) cv2.waitKey(0) else: ############ CV2 ORB ############ ''' img = cv2.imread(str(frame),0) # Initiate ORB detector orb = cv2.ORB_create() # find the keypoints with ORB kp = orb.detect(img,None) # compute the descriptors with ORB kp, des = orb.compute(img, kp) # draw only keypoints location,not size and orientation img2 = cv2.drawKeypoints(img, kp, None, color=(0,255,0), flags=0) plt.imshow(img2), plt.show()''' pass return notedFrame
def locate(self, input=None, store=True, frame=0): # Retrieve the array if input is None: input = self.input array = _get_array(input, frame=frame) # Check odd numbers self.diameter = _nbr2odd(self.diameter) # Run TrackPy dataframe = tp.locate( array, self.diameter, minmass=self.minmass, maxsize=self.maxsize, separation=self.separation, noise_size=self.noise_size, smoothing_size=self.smoothing_size, threshold=self.threshold, invert=self.invert, percentile=self.percentile, topn=self.topn, preprocess=self.preprocess, max_iterations=self.max_iterations, characterize=self.characterize, engine=self.engine, ) # Store in the instance if store: self.spots = deepcopy(dataframe) self.tracks = deepcopy(dataframe) return dataframe
def test_all_maxima_filtered(self): self.check_skip() black_image = np.ones((21, 23)).astype(np.uint8) draw_point(black_image, [11, 13], 10) with assert_produces_warning(UserWarning): f = tp.locate(black_image, 5, minmass=1000, engine=self.engine, preprocess=False)
def test_mass(self): # The mass calculated from the processed image should be independent # of added noise. Its absolute value is untested. # The mass calculated from the raw image should equal # noiseless mass + noise_size/2 * Npx_in_mask. self.check_skip() ndim = 2 radius = 6 N = 20 shape = (128, 127) # Calculate the expected mass from a single spot using the set masksize center = (radius * 2, ) * ndim spot = draw_spots((radius * 4, ) * ndim, [center], radius * 3, bitdepth=12) rect = [slice(c - radius, c + radius + 1) for c in center] mask = tp.masks.binary_mask(radius, 2) Npx = mask.sum() EXPECTED_MASS = (spot[rect] * mask).sum() # Generate feature locations and make the image expected = gen_nonoverlapping_locations(shape, N, radius * 3, radius + 2) expected = expected + np.random.random(expected.shape) N = expected.shape[0] image = draw_spots(shape, expected, radius * 3, bitdepth=12) # analyze the image without noise f = tp.locate(image, radius * 2 + 1, engine=self.engine, topn=N) PROCESSED_MASS = f['mass'].mean() assert_allclose(f['raw_mass'].mean(), EXPECTED_MASS, rtol=0.01) for n, noise in enumerate(np.arange(0.05, 0.8, 0.05)): noise_level = int((2**12 - 1) * noise) image_noisy = image + np.array(np.random.randint( 0, noise_level, image.shape), dtype=image.dtype) f = tp.locate(image_noisy, radius * 2 + 1, engine=self.engine, topn=N) assert_allclose(f['mass'].mean(), PROCESSED_MASS, rtol=0.1) assert_allclose(f['raw_mass'].mean(), EXPECTED_MASS + Npx * noise_level / 2, rtol=0.1)
def test(): frames = gray(pims.open('data/png/crack_tip/*.png')) print(frames) print(frames[0][100, :]) plt.imshow(frames[0]) f = tp.locate(frames[0], diameter=15, invert=False, minmass=1) f.head() tp.annotate(f, frames[0])
def test_oldmass_16bit(self): old_minmass = 2800000 im = draw_spots(self.shape, self.pos, self.size, bitdepth=16, noise_level=10000) new_minmass = self.minmass_v02_to_v04(im, old_minmass) f = tp.locate(im, self.tp_diameter, minmass=new_minmass) assert len(f) == self.N
def testpart(frames, frameno, minthresh, ns, verbose): dtarget = 3 ns = 1 sep = dtarget #smaller value leads to fewer points f = tp.locate(frames[frameno], noise_size = ns, separation = sep, smoothing_size = \ dtarget + 1, diameter = dtarget, minmass= minthresh , topn = 200, invert=False) nopart = f.shape[0] return nopart
def Detect(estimateFeatureSize, CameraName, minMass=None, dynamicMinMass=False, Crop=False): ImagePath = os.path.join('data', CaseName.split('-')[0], CaseName.split('-')[1]) Path = os.path.join(ImagePath, CameraName + '*.tif') frames = pims.open(Path) if Crop: frames = pims.process.crop(frames, ((400, 700), (0, 0))) print('Valid frames length is %d' % len(frames)) # check start frame and end frame with total frames number if len(frames) != (endFrame - startFrame + 1): print('Invalid frames length') return # find five brightest if not minMass: f = tp.locate(frames[testFrame], estimateFeatureSize) mass = list(f['mass']) mass.sort() minMass = int(mass[-2] * 0.9 + mass[-1] * 0.1) print(minMass) #TopTen = np.argsort(f['mass'])[-5:] #TopTenArray = f['mass'][TopTen] # show mass histogram # show subpixel accuracy of the detection #minMass = list(TopTenArray)[0] f = tp.locate(frames[testFrame], estimateFeatureSize, minmass=minMass) plt.figure() tp.annotate(f, frames[testFrame]) # run batch processing for all frames if dynamicMinMass: f = f[0:0] for i in range(len(frames)): f_ele = tp.locate(frames[i], estimateFeatureSize) mass = list(f_ele['mass']) mass.sort() minMass = int(mass[-2] * 0.9 + mass[-1] * 0.1) f_ele = tp.locate(frames[i], estimateFeatureSize, minmass=minMass) f = f.append(f_ele) else: f = tp.batch(frames, estimateFeatureSize, minmass=minMass) return f, frames
def test_rg(self): # For Gaussians with radii 2, 3, 5, and 7 px, with proportionately # chosen feature (mask) sizes, the 'size' comes out to be within 10% # of the true Gaussian width. # The IDL code has mistake in this area, documented here: # http://www.physics.emory.edu/~weeks/idl/radius.html self.check_skip() L = 101 dims = (L, L + 2) # avoid square images in tests pos = [50, 55] cols = ['x', 'y'] SIZE = 2 image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, SIZE) actual = tp.locate(image, 7, 1, preprocess=False, engine=self.engine)['size'] expected = SIZE assert_allclose(actual, expected, rtol=0.1) SIZE = 3 image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, SIZE) actual = tp.locate(image, 11, 1, preprocess=False, engine=self.engine)['size'] expected = SIZE assert_allclose(actual, expected, rtol=0.1) SIZE = 5 image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, SIZE) actual = tp.locate(image, 17, 1, preprocess=False, engine=self.engine)['size'] expected = SIZE assert_allclose(actual, expected, rtol=0.1) SIZE = 7 image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, SIZE) actual = tp.locate(image, 23, 1, preprocess=False, engine=self.engine)['size'] expected = SIZE assert_allclose(actual, expected, rtol=0.1)
def test_ep(self): # Test whether the estimated static error equals the rms deviation from # the expected values. Next to the feature mass, the static error is # calculated from the estimated image background level and variance. # This estimate is also tested here. # A threshold is necessary to identify the background array so that # background average and standard deviation can be estimated within 1% # accuracy. # The tolerance for ep in this test is 0.001 px or 20%. # This amounts to roughly the following rms error values: # noise / signal = 0.01 : 0.004+-0.001 px # noise / signal = 0.02 : 0.008+-0.002 px # noise / signal = 0.05 : 0.02+-0.004 px # noise / signal = 0.1 : 0.04+-0.008 px # noise / signal = 0.2 : 0.08+-0.016 px # noise / signal = 0.3 : 0.12+-0.024 px # noise / signal = 0.5 : 0.2+-0.04 px # Parameters are tweaked so that there is no deviation due to a too # small mask size. Noise/signal ratios up to 50% are tested. self.check_skip() draw_size = 4.5 locate_diameter = 21 N = 200 noise_levels = (np.array([0.01, 0.02, 0.05, 0.1, 0.2, 0.3, 0.5]) * (2**12 - 1)).astype(np.int) real_rms_dev = [] eps = [] actual_black_level = [] actual_noise = [] expected, image = draw_array(N, draw_size, bitdepth=12) for n, noise_level in enumerate(noise_levels): image_noisy = image + np.array(np.random.randint(0, noise_level, image.shape), dtype=image.dtype) f = tp.locate(image_noisy, locate_diameter, engine=self.engine, topn=N, threshold=noise_level/4) _, actual = sort_positions(f[['y', 'x']].values, expected) rms_dev = np.sqrt(np.mean(np.sum((actual-expected)**2, 1))) real_rms_dev.append(rms_dev) eps.append(f['ep'].mean()) # Additionally test the measured noise black_level, noise = measure_noise(image, image_noisy, locate_diameter // 2) actual_black_level.append(black_level) actual_noise.append(noise) assert_allclose(actual_black_level, 1/2 * noise_levels, rtol=0.01, atol=1) assert_allclose(actual_noise, np.sqrt(1/12.) * noise_levels, rtol=0.01, atol=1) assert_allclose(real_rms_dev, eps, rtol=0.2, atol=0.001) assert_array_less(real_rms_dev, eps)
def test_oldmass_float(self): old_minmass = 5500 im = draw_spots(self.shape, self.pos, self.size, bitdepth=8, noise_level=50) im = (im / im.max()).astype(np.float) new_minmass = self.minmass_v02_to_v04(im, old_minmass) f = tp.locate(im, self.tp_diameter, minmass=new_minmass) assert len(f) == self.N
def locate(img: np.ndarray, diameter: int, minmass: int, maxmass: int, maxsize: float) -> pd.DataFrame: df = trackpy.locate(img, diameter=diameter, minmass=minmass, maxsize=maxsize) df = df[df['mass'] < maxmass] return df
def image_to_spots( self, data_image: np.ndarray, ) -> PerImageSliceSpotResults: """ Parameters ---------- data_image : np.ndarray three-dimensional image containing spots to be detected Returns ------- PerImageSpotResults : includes a SpotAttributes DataFrame of metadata containing the coordinates, intensity and radius of each spot, as well as any extra information collected during spot finding. """ data_image = np.asarray(data_image) with warnings.catch_warnings(): warnings.simplefilter( 'ignore', FutureWarning) # trackpy numpy indexing warning warnings.simplefilter('ignore', UserWarning) # yielded if black images attributes = locate( data_image, diameter=self.diameter, minmass=self.minmass, maxsize=self.maxsize, separation=self.separation, noise_size=self.noise_size, smoothing_size=self.smoothing_size, threshold=self.threshold, percentile=self.percentile, preprocess=self.preprocess, max_iterations=self.max_iterations, ) # when zero spots are detected, 'ep' is missing from the trackpy locate results. if attributes.shape[0] == 0: attributes['ep'] = [] # TODO ambrosejcarr: data should always be at least pseudo-3d, this may not be necessary # TODO ambrosejcarr: this is where max vs. sum vs. mean would be parametrized. # here, total_intensity = sum, intensity = max new_colnames = [ 'y', 'x', 'total_intensity', 'radius', 'eccentricity', 'intensity', 'raw_mass', 'ep' ] if len(data_image.shape) == 3: attributes.columns = ['z'] + new_colnames else: attributes.columns = new_colnames attributes['spot_id'] = np.arange(attributes.shape[0]) return PerImageSliceSpotResults(spot_attrs=SpotAttributes(attributes), extras=None)
def test_oldmass_16bit(self): old_minmass = 2800000 im = draw_spots(self.shape, self.pos, self.draw_diameter, bitdepth=16, noise_level=10000) new_minmass = tp.minmass_version_change(im, old_minmass, smoothing_size=self.tp_diameter) f = tp.locate(im, self.tp_diameter, minmass=new_minmass) assert len(f) == self.N
def test_oldmass_invert(self): old_minmass = 2800000 im = draw_spots(self.shape, self.pos, self.size, bitdepth=12, noise_level=500) im = (im.max() - im + 10000) new_minmass = self.minmass_v02_to_v04(im, old_minmass, invert=True) f = tp.locate(invert_image(im), self.tp_diameter, minmass=new_minmass) assert len(f) == self.N
def test_smoke_datatypes(self): self.check_skip() SHAPE = (300, 300) # simple "smoke" test to see if numba explodes dummy_image = np.random.randint(0, 100, SHAPE).astype(np.uint8) tp.locate(dummy_image, 5, engine=self.engine) tp.locate(dummy_image, 5, invert=True, engine=self.engine) # Check float types dummy_image = np.random.rand(*SHAPE) tp.locate(dummy_image, 5, engine=self.engine) tp.locate(dummy_image, 5, invert=True, engine=self.engine)
def test_oldmass_invert(self): old_minmass = 2800000 im = draw_spots(self.shape, self.pos, self.draw_diameter, bitdepth=12, noise_level=500) im = (im.max() - im + 10000) new_minmass = tp.minmass_version_change(im, old_minmass, invert=True, smoothing_size=self.tp_diameter) f = tp.locate(im, self.tp_diameter, minmass=new_minmass, invert=True) assert len(f) == self.N
def test_oldmass_float(self): old_minmass = 5500 im = draw_spots(self.shape, self.pos, self.draw_diameter, bitdepth=8, noise_level=50) im = (im / im.max()).astype(np.float) new_minmass = tp.minmass_version_change(im, old_minmass, smoothing_size=self.tp_diameter) f = tp.locate(im, self.tp_diameter, minmass=new_minmass) assert len(f) == self.N
def test_mass(self): # The mass calculated from the processed image should be independent # of added noise. Its absolute value is untested. # The mass calculated from the raw image should equal # noiseless mass + noise_size/2 * Npx_in_mask. self.check_skip() ndim = 2 size = 3 # for drawing radius = 6 # for locate diameter = radius * 2 + 1 N = 20 shape = (128, 127) # Calculate the expected mass from a single spot using the set masksize center = (diameter,) * ndim spot = draw_spots((diameter*2,) * ndim, [center], size, bitdepth=12) rect = [slice(c - radius, c + radius + 1) for c in center] mask = tp.masks.binary_mask(radius, 2) Npx = mask.sum() EXPECTED_MASS = (spot[rect] * mask).sum() # Generate feature locations and make the image expected = gen_nonoverlapping_locations(shape, N, diameter, diameter) expected = expected + np.random.random(expected.shape) N = expected.shape[0] image = draw_spots(shape, expected, size, bitdepth=12) # analyze the image without noise f = tp.locate(image, diameter, engine=self.engine, topn=N) PROCESSED_MASS = f['mass'].mean() assert_allclose(f['raw_mass'].mean(), EXPECTED_MASS, rtol=0.01) for n, noise in enumerate(np.arange(0.05, 0.8, 0.05)): noise_level = int((2**12 - 1) * noise) image_noisy = image + np.array(np.random.randint(0, noise_level, image.shape), dtype=image.dtype) f = tp.locate(image_noisy, radius*2+1, engine=self.engine, topn=N) assert_allclose(f['mass'].mean(), PROCESSED_MASS, rtol=0.1) assert_allclose(f['raw_mass'].mean(), EXPECTED_MASS + Npx*noise_level/2, rtol=0.1)
def imageClassifier(frames, cell_size, min_mass, particle_separation): print("We have "+str(len(frames))+" images in this batch.\nWe will open them up one at a time.\n Feel free to save them and then exit the window to view the next one.") for frame in frames: f = tp.locate(frame, cell_size, invert=True, minmass=min_mass, separation=particle_separation, noise_size=4) #locationWeightHistogram(f) plt.figure("Filename") # make a new figure plt.xlabel('Number of cells: ' + str(len(f))) tp.annotate(f, frame) plt.savefig('./filename.png', format='png') # save the figure to file print(f.head()) return 1
def upload(): #clear the DOWNLOAD directory for root, dirs, files in os.walk(app.config['DOWNLOAD_FOLDER']): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) #clear the UPLOAD directory for root, dirs, files in os.walk(app.config['UPLOAD_FOLDER']): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) # Get specifications uploaded_files = request.files.getlist("file[]") _invert = request.form['invert'] _diameter = int(request.form['diameter']) _min_mass = float(request.form['minmass']) _noise_size = float(request.form['noise_size']) _smoothing_size = float(request.form['smoothing_size']) _separation = float(request.form['separation']) #list of files for use on upload.html filenames = [] for file in uploaded_files: # Check if the file is one of the allowed types/extensions if file and allowed_file(file.filename): # Make the filename safe, remove unsupported chars filename = secure_filename(file.filename) #Save file to upload folder file.save(os.path.join(app.config['DOWNLOAD_FOLDER'], filename)) #load the image frames frames = pims.ImageSequence(os.path.join(app.config['DOWNLOAD_FOLDER'], filename), as_grey=True) #for loop of 1 to deal with PIMS bug. for frame in frames: #locate features f = tp.locate(frames, _diameter, minmass=_min_mass, separation=_separation, invert=_invert) #noise_size=_noise_size, smoothing_size=_smoothing_size, plt.ioff() #interactive mode = off plt.figure(filename) # make a new figure plt.title(filename) plt.xlabel('Number of cells: ' + str(len(f))) #label axis tp.annotate(f, frame) #display the iamge and the circle overlay #filename_png_extension = os.path.splitext(filename)[0] + ".png" plt.savefig(os.path.join(app.config['UPLOAD_FOLDER'], filename), format='png') # save the figure to filenames plt.close() #close figure # Save the filename into a list, we'll use it later filenames.append(filename) print(filenames, file=sys.stderr) # Load an html page with a link to each uploaded file return render_template('upload.html', filenames=filenames)
def test_one_centered_gaussian(self): self.check_skip() L = 21 dims = (L, L + 2) # avoid square images in tests pos = np.array([7, 13]) cols = ['x', 'y'] expected = DataFrame(pos.reshape(1, -1), columns=cols) image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos[::-1], 4) actual = tp.locate(image, 9, 1, preprocess=False, engine=self.engine)[cols] assert_allclose(actual, expected, atol=0.1)
def _create_nv_image(image, nv_size, created_pt_size = 3): # COMMENT_ME y_len, x_len = image.shape f = tp.locate(image, nv_size) new_image = np.zeros((y_len, x_len)) nv_locs = f.values for pt in nv_locs: for i in range(-created_pt_size, created_pt_size+1): for j in range(-created_pt_size, created_pt_size+1): if pt[1] + j < y_len and pt[1] - j >= 0 and pt[0] + i < x_len and pt[0] - i >= 0: new_image[pt[1] + j, pt[0] + i] = 10 return new_image
def test_one_centered_gaussian_3D(self): self.skip_numba() L = 21 dims = (L, L + 2, L + 4) # avoid square images in tests pos = [7, 13, 9] cols = ['x', 'y', 'z'] expected = DataFrame(np.asarray(pos).reshape(1, -1), columns=cols) image = np.ones(dims, dtype='uint8') draw_gaussian_spot(image, pos, 4) actual = tp.locate(image, 9, 1, preprocess=False, engine=self.engine)[cols] assert_allclose(actual, expected, atol=0.1)
def get_params_locate(frame,diameter=15,minmass_percentile=92,out_fh=None,test=True,figsize=None): f = tp.locate(frame, diameter, invert=False) minmass=np.percentile(f['mass'],minmass_percentile) logging.info('feature count= %s, %spercentile= %s' % (len(f),minmass_percentile,minmass)) f = tp.locate(frame, diameter, invert=False, minmass=np.percentile(f['mass'],minmass_percentile)) logging.info('feature count= %s, %spercentile= %s' % (len(f),minmass_percentile, np.percentile(f['mass'],minmass_percentile))) if test: logging.info('getting plots annotate') # plt.clf() fig=plt.figure(figsize=figsize) ax=plt.subplot(111) ax=tp.annotate(f, frame,ax=ax) if not out_fh is None: # plt.savefig('%s.annotate.pdf' % out_fh,format='pdf') plt.savefig('%s.annotate.svg' % out_fh,format='svg') # plt.clf() logging.info('getting plots hist') cols=['mass','size','ecc','signal','raw_mass','ep'] fig=plt.figure() ax=plt.subplot(111) _=f.loc[:,cols].hist(ax=ax) if not out_fh is None: plt.savefig('%s.feature_props.svg' % out_fh,format='svg') # plt.clf() logging.info('getting plots bias') fig=plt.figure() tp.subpx_bias(f); if not out_fh is None: plt.savefig('%s.subpx_bias.svg' % out_fh,format='svg') # plt.clf() params_locate={'diameter':diameter, 'minmass':minmass} return params_locate
def test_one_centered_gaussian_3D_anisotropic(self): self.check_skip() L = 21 dims = (L, L + 2, L + 4) # avoid square images in tests pos = [7, 13, 9] cols = ['z', 'y', 'x'] expected = DataFrame(np.asarray(pos).reshape(1, -1), columns=cols) image = np.ones(dims, dtype='uint8') draw_feature(image, pos, (21, 27, 27)) actual = tp.locate(image, (7, 9, 9), 1, preprocess=False, engine=self.engine)[cols] assert_allclose(actual, expected, atol=0.1)
def compare(shape, count, radius, noise_level, engine): radius = tp.utils.validate_tuple(radius, len(shape)) # tp.locate ignores a margin of size radius, take 1 px more to be safe margin = tuple([r + 1 for r in radius]) diameter = tuple([(r * 2) + 1 for r in radius]) draw_range = tuple([d * 3 for d in diameter]) cols = ['x', 'y', 'z'][:len(shape)][::-1] pos = gen_nonoverlapping_locations(shape, count, draw_range, margin) image = draw_spots(shape, pos, draw_range, noise_level) f = tp.locate(image, diameter, engine=engine) actual = f[cols].sort(cols) expected = DataFrame(pos, columns=cols).sort(cols) return actual, expected
def test_minmass_maxsize(self): # Test the mass- and sizebased filtering here on 4 different features. self.check_skip() L = 64 dims = (L, L + 2) cols = ['y', 'x'] PRECISION = 1 # we are not testing for subpx precision here image = np.zeros(dims, dtype=np.uint8) pos1 = np.array([15, 20]) pos2 = np.array([40, 40]) pos3 = np.array([25, 45]) pos4 = np.array([35, 15]) draw_feature(image, pos1, 15) draw_feature(image, pos2, 30) draw_feature(image, pos3, 5) draw_feature(image, pos4, 20) # filter on mass actual = tp.locate(image, 15, engine=self.engine, preprocess=False, minmass=6500)[cols] actual = actual.sort(cols) expected = DataFrame([pos2, pos4], columns=cols).sort(cols) assert_allclose(actual, expected, atol=PRECISION) # filter on size actual = tp.locate(image, 15, engine=self.engine, preprocess=False, maxsize=3.0)[cols] actual = actual.sort(cols) expected = DataFrame([pos1, pos3], columns=cols).sort(cols) assert_allclose(actual, expected, atol=PRECISION) # filter on both mass and size actual = tp.locate(image, 15, engine=self.engine, preprocess=False, minmass=600, maxsize=4.0)[cols] actual = actual.sort(cols) expected = DataFrame([pos1, pos4], columns=cols).sort(cols) assert_allclose(actual, expected, atol=PRECISION)
def test_characterize(self): df = tp.locate(self.v0_inverted, diameter=9) df = df[(df['x'] < 64) & (df['y'] < 64)] actual_coords = df[self.pos_columns].values actual_char = df[self.char_columns].values try: assert_allclose(actual_coords, self.expected_characterize[:, :2]) except AssertionError: raise AssertionError('The characterize tests failed as the coords' ' found by locate were not reproduced.') assert_allclose(actual_char, self.expected_characterize[:, 2:])
def compare(shape, count, radius, noise_level, **kwargs): radius = tp.utils.validate_tuple(radius, len(shape)) # tp.locate ignores a margin of size radius, take 1 px more to be safe margin = tuple([r + 1 for r in radius]) diameter = tuple([(r * 2) + 1 for r in radius]) size = [d / 2 for d in diameter] separation = tuple([d * 3 for d in diameter]) cols = ['x', 'y', 'z'][:len(shape)][::-1] pos = gen_nonoverlapping_locations(shape, count, separation, margin) image = draw_spots(shape, pos, size, noise_level) f = tp.locate(image, diameter, **kwargs) actual = pandas_sort(f[cols], cols) expected = pandas_sort(DataFrame(pos, columns=cols), cols) return actual, expected