def peak_finder(X, thresh): """Simple peak finding algorithm. Parameters ---------- X : array_like, shape (n_times,) Raw data trace. thresh : float Amplitude threshold. Returns ------- peak_loc : array_like, shape (n_clusters,) Index of peak amplitudes. peak_mag : array_like, shape (n_clusters,) Magnitude of peak amplitudes. """ ## Error-catching. assert X.ndim == 1 ## Identify clusters. clusters, ix = measurements.label(X > thresh) ## Identify index of peak amplitudes. peak_loc = np.concatenate( measurements.maximum_position(X, labels=clusters, index=np.arange(ix) + 1)) ## Identify magnitude of peak amplitudes. peak_mag = measurements.maximum(X, labels=clusters, index=np.arange(ix) + 1) return peak_loc, peak_mag
def peak_finder(arr, thresh): '''Absolute amplitude-based peak finding algorithm. Parameters ---------- arr : array, shape=(n_samples,) 1-d timeseries of extracellular recording. thresh : scalar amplitude threshold. Only samples above this value will be included in clusters. Returns ------- peak_loc : array indices of cluster peaks. peak_mag : array magnitude of cluster peaks. ''' assert arr.ndim == 1 clusters, ix = measurements.label(arr > thresh) if not ix: return np.array([]), np.array([]) peak_loc = np.concatenate( measurements.maximum_position(arr, labels=clusters, index=np.arange(ix) + 1)) peak_mag = measurements.maximum(arr, labels=clusters, index=np.arange(ix) + 1) return peak_loc, peak_mag
def find_blob_centers(predictions, resolution, blob_prediction_threshold, blob_size_threshold): # smooth out "U-Net noise" # print("Median-filtering prediction...") # predictions = median_filter(predictions, size=3) print("Finding blobs...") start = time.time() blobs = predictions > blob_prediction_threshold labels, num_blobs = label(blobs) print("%.3fs" % (time.time() - start)) print("Found %d blobs" % num_blobs) print("Finding centers, sizes, and maximal values...") start = time.time() label_ids = np.arange(1, num_blobs + 1) centers = measurements.center_of_mass(blobs, labels, index=label_ids) sizes = measurements.sum(blobs, labels, index=label_ids) maxima = measurements.maximum(predictions, labels, index=label_ids) print("%.3fs" % (time.time() - start)) centers = { label: { 'center': center, 'score': max_value } for label, center, size, max_value in zip( label_ids, centers, sizes, maxima) if size >= blob_size_threshold } return (centers, labels)
def find_maxima(predictions, voxel_size, radius, sigma=None, min_score_threshold=0): '''Find all points that are maximal within a sphere of ``radius`` and are strictly higher than min_score_threshold. Optionally smooth the prediction with sigma.''' # smooth predictions if sigma is not None: print("Smoothing predictions...") sigma = tuple(float(s) / r for s, r in zip(sigma, voxel_size)) print("voxel-sigma: %s" % (sigma, )) start = time.time() predictions = gaussian_filter(predictions, sigma, mode='constant') print("%.3fs" % (time.time() - start)) print("Finding maxima...") start = time.time() radius = tuple( int(math.ceil(float(ra) / re)) for ra, re in zip(radius, voxel_size)) print("voxel-radius: %s" % (radius, )) max_filtered = maximum_filter(predictions, footprint=sphere(radius)) maxima = max_filtered == predictions print("%.3fs" % (time.time() - start)) print("Applying NMS...") start = time.time() predictions_filtered = np.zeros_like(predictions) predictions_filtered[maxima] = predictions[maxima] print("%.3fs" % (time.time() - start)) print("Finding blobs...") start = time.time() blobs = predictions_filtered > min_score_threshold labels, num_blobs = label(blobs, output=np.uint64) print("%.3fs" % (time.time() - start)) print("Found %d points after NMS" % num_blobs) print("Finding centers, sizes, and maximal values...") start = time.time() label_ids = np.arange(1, num_blobs + 1) centers = measurements.center_of_mass(blobs, labels, index=label_ids) sizes = measurements.sum(blobs, labels, index=label_ids) maxima = measurements.maximum(predictions, labels, index=label_ids) print("%.3fs" % (time.time() - start)) centers = { label: { 'center': center, 'score': max_value } for label, center, size, max_value in zip(label_ids, centers, sizes, maxima) } return (centers, labels, predictions)
def measure(im, labels, num_objs, measurement_type='mean'): if measurement_type == 'mean': res = spm.mean(im, labels, range(1, num_objs)) elif measurement_type == 'max': res = spm.maximum(im, labels, range(1, num_objs)) else: raise ValueError('Unsporrted measurement type: {}'.format(measurement_type)) return res
def amplitude_thresholding(x0, thresh): '''Simple peak finding algorithm.''' assert x0.ndim == 1 clusters, ix = measurements.label(x0 > thresh) peak_loc = np.concatenate( measurements.maximum_position(x0, labels=clusters, index=np.arange(ix) + 1)) peak_mag = measurements.maximum(x0, labels=clusters, index=np.arange(ix) + 1) return peak_loc, peak_mag
def _localize_process(data: tuple, is_binary: bool = True, use_labels: bool = False) -> np.ndarray: # image: np.ndarray, frame: int) -> np.ndarray: """ worker process for localizing and labelling objects volumetric data is usually of the format: t, z, x, y Returns: combined data in form of nx5 array (t, x, y, z, label) adding a z-dimension of uniform zero, if one doesn't exist. """ if use_labels: assert is_binary image, frame = data assert image.dtype in (np.uint8, np.uint16) if is_binary: labeled, n = measurements.label(image.astype(np.bool)) idx = list(range(1, n + 1)) else: labeled = image idx = [p for p in np.unique(labeled) if p > 0] # calculate the centroids centroids = np.array( measurements.center_of_mass(image, labels=labeled, index=idx)) # if we're dealing with volumetric data, reorder so that z is last if image.ndim == 3: centroids = np.roll(centroids, -1) localizations = np.zeros((centroids.shape[0], 5), dtype=np.uint16) localizations[:, 0] = frame # time localizations[:, 1:centroids.shape[1] + 1] = centroids localizations[:, -1] = 0 #labels-1 #-1 because we use a label of zero for states # if we're not using labels from the segmentation, return here if not use_labels: return localizations # get the labels from the image data labels = np.array(measurements.maximum(image, labels=labeled, index=idx)) localizations[:, -1] = labels - 1 #-1 because we use a label of zero for states return localizations
def local_errors(self, threshold): subregion = crop_region(patch_size, ERRORS_CROP) unique_list = unique_nonzero(self.raw_labels[subregion]) max_error_list = measurements.maximum(self.errors, self.raw_labels, unique_list) additional_segments = [ unique_list[i] for i in xrange(len(unique_list)) if max_error_list[i] > threshold or max_error_list[i] == 0.0 ] additional_segments = filter( lambda x: x != 0 and x not in self.parent.valid, additional_segments) return additional_segments
def dsm(parameters): """Generates a DSM by elevating groups a cells by certain height. This requires an input array, the DTM, and a mask. The mask designates which cells of the DTM should be elevated in order to produce the DSM. Basically, the mask shows in which cells there are features with significant height, e.g. trees, buildings etc. The tricky part it to account for DTM slope when elevating a group of cells. If you simply add some height to the initial DTM then the features will be elevated parallel to the ground. Especially in the case of buildings, their roof is horizontal, regardless of the underlying DTM slope. To account for this, the algorithm initially labels the mask. As a result you get groups of cells which should all be elevated to the same height. Next, it finds the maximum height value of underlying DTM for each blob. Finally, it assigns `max_blob_height + delta_height` to each blob cell. :param parameters['data'][0]: the base DTM :type parameters['data'][0]: numpy.array :param parameters['data'][1]: the mask of cells to elevate :type parameters['data'][1]: numpy.array with boolean/binary values :param parameters['delta_height']: single cell elevation value :type parameters['delta_height']: float or integer :return: numpy.array """ dtm = parameters['data'][0] mask = parameters['data'][1] delta_height = parameters['delta_height'] # label and find the max height of each blob labels, count = measurements.label(mask) max_heights = measurements.maximum(dtm, labels=labels, index=range(1, count + 1)) # assign the max height at each blob cell, required to copy so it won't # change the initial dtm values dsm = dtm.copy() for blob_id in range(1, count + 1): dsm[numpy.where(labels == blob_id)] = max_heights[blob_id - 1] +\ delta_height return dsm
def detect(self): """ a method for detecting peaks, trying to speed things up! Run this after running the thresholding (which sets the pk_mask)! """ self.pos = [] self.intens = [] sz = self.pk_par["sz"] sig_G = self.pk_par["sig_G"] min_conn = self.pk_par["min_conn"] max_conn = self.pk_par["max_conn"] peak_COM = self.pk_par["peak_COM"] self.lab_img, _ = measurements.label(self.img * self.pk_mask) obs = measurements.find_objects(self.lab_img) for i, (sy, sx) in enumerate(obs): lab_idx = i + 1 y1 = max(0, sy.start - sz) y2 = min(self.img_sh[0], sy.stop + sz) x1 = max(0, sx.start - sz) x2 = min(self.img_sh[1], sx.stop + sz) l = self.lab_img[y1:y2, x1:x2] nconn = np.sum(l == (lab_idx)) if nconn < min_conn: continue if nconn > max_conn: continue pix = self.img[y1:y2, x1:x2] self.intens.append(measurements.maximum(pix, l, lab_idx)) if peak_COM: y, x = measurements.center_of_mass(pix, l, lab_idx) else: y, x = measurements.maximum_position(pix, l, lab_idx) self.pos.append((y + y1, x + x1)) self.intens = np.array(self.intens)
def find_max_points( predictions, resolution, radius, sigma=None, min_score_threshold=0): '''Find all points that are maximal withing a sphere of ``radius`` and are strictly higher than min_score_threshold. Optionally smooth the prediction with sigma.''' # smooth predictions if sigma is not None: predictions = smooth(predictions, resolution, sigma) maxima = find_maxima(predictions, resolution, radius) predictions_filtered = apply_nms(predictions, maxima) print("Finding blobs...") start = time.time() blobs = predictions_filtered > min_score_threshold labels, num_blobs = label(blobs) print("%.3fs"%(time.time()-start)) print("Found %d blobs after NMS"%num_blobs) print("Finding centers, sizes, and maximal values...") start = time.time() label_ids = np.arange(1, num_blobs + 1) centers = measurements.center_of_mass(blobs, labels, index=label_ids) sizes = measurements.sum(blobs, labels, index=label_ids) maxima = measurements.maximum(predictions, labels, index=label_ids) print("%.3fs"%(time.time()-start)) centers = { str(label): { 'center': center, 'score': max_value } for label, center, size, max_value in zip(label_ids, centers, sizes, maxima) } return (centers, labels)
def roi_max_counts(images_sets, label_array): """ Return the brightest pixel in any ROI in any image in the image set. Parameters ---------- images_sets : array iterable of 4D arrays shapes is: (len(images_sets), ) label_array : array labeled array; 0 is background. Each ROI is represented by a distinct label (i.e., integer). Returns ------- max_counts : int maximum pixel counts """ max_cts = 0 for img_set in images_sets: for img in img_set: max_cts = max(max_cts, ndim.maximum(img, label_array)) return max_cts
def max_clustering(Response, Mask, r=10): """Local max clustering pixel aggregation for nuclear segmentation. Takes as input a constrained log or other filtered nuclear image, a binary nuclear mask, and a clustering radius. For each pixel in the nuclear mask, the local max is identified. A hierarchy of local maxima is defined, and the root nodes used to define the label image. Parameters ---------- Response : array_like A filtered-smoothed image where the maxima correspond to nuclear center. Typically obtained by constrained-LoG filtering on a hematoxylin intensity image obtained from ColorDeconvolution. Mask : array_like A binary mask of type boolean where nuclei pixels have value 'True', and non-nuclear pixels have value 'False'. r : float A scalar defining the clustering radius. Default value = 10. Returns ------- Label : array_like Label image where positive values correspond to foreground pixels that share mutual sinks. Seeds : array_like An N x 2 array defining the (x,y) coordinates of nuclei seeds. Maxima : array_like An N x 1 array containing the maximum response value corresponding to 'Seeds'. See Also -------- histomicstk.filters.shape.clog References ---------- .. [1] XW. Wu et al "The local maximum clustering method and its application in microarray gene expression data analysis," EURASIP J. Appl. Signal Processing, volume 2004, no.1, pp.53-63, 2004. .. [2] Y. Al-Kofahi et al "Improved Automatic Detection and Segmentation of Cell Nuclei in Histopathology Images" in IEEE Transactions on Biomedical Engineering,vol.57,no.4,pp.847-52, 2010. """ # check type of input mask if Mask.dtype != np.dtype("bool"): raise TypeError("Input 'Mask' must be a bool") # define kernel for max filter Kernel = np.zeros((2 * r + 1, 2 * r + 1), dtype=bool) X, Y = np.meshgrid(np.linspace(0, 2 * r, 2 * r + 1), np.linspace(0, 2 * r, 2 * r + 1)) X -= r Y -= r Kernel[(X ** 2 + Y ** 2) ** 0.5 <= r] = True # define linear coordinates of postive kernel entries X = X[Kernel].astype(np.int) Y = Y[Kernel].astype(np.int) # pad input array to simplify filtering I = Response.min() * np.ones((Response.shape[0] + 2 * r, Response.shape[1] + 2 * r)) MaskedResponse = Response.copy() MaskedResponse[~Mask] = Response.min() I[r : r + Response.shape[0], r : r + Response.shape[1]] = MaskedResponse # initialize coordinate arrays and max value arrays Max = np.zeros(I.shape) Row = np.zeros(I.shape, dtype=np.int) Col = np.zeros(I.shape, dtype=np.int) # define pixels for local neighborhoods py, px = np.nonzero(Mask) py = py + np.int(r) px = px + np.int(r) # perform max filtering for i in np.arange(0, px.size, 1): # calculate local max value and position around px[i], py[i] Index = np.argmax(I[py[i] + Y, px[i] + X]) Max[py[i], px[i]] = I[py[i] + Y[Index], px[i] + X[Index]] Row[py[i], px[i]] = py[i] + Y[Index] - r Col[py[i], px[i]] = px[i] + X[Index] - r # trim outputs Max = Max[r : Response.shape[0] + r, r : Response.shape[1] + r] Row = Row[r : Response.shape[0] + r, r : Response.shape[1] + r] Col = Col[r : Response.shape[0] + r, r : Response.shape[1] + r] # subtract out padding offset for px, py py = py - r px = px - r # identify connected regions of local maxima and define their seeds Label = spm.label((Response == Max) & Mask)[0] Seeds = np.array(spm.center_of_mass(Response, Label, np.arange(1, Label.max() + 1))) Seeds = np.round(Seeds).astype(np.uint32) # capture maxima for each connected region Maxima = spm.maximum(Response, Label, np.arange(1, Label.max() + 1)) # handle seeds lying outside non-convex objects Fix = np.nonzero( Label[Seeds[:, 0].astype(np.uint32), Seeds[:, 1].astype(np.uint32)] != np.arange(1, Label.max() + 1) )[0] if Fix.size > 0: Locations = spm.find_objects(Label) for i in np.arange(Fix.size): Patch = Label[Locations[Fix[i]]] Pixels = np.nonzero(Patch) dX = Pixels[1] - (Seeds[Fix[i], 1] - Locations[Fix][1].start) dY = Pixels[0] - (Seeds[Fix[i], 0] - Locations[Fix][0].start) Dist = (dX ** 2 + dY ** 2) ** 0.5 NewSeed = np.argmin(Dist) Seeds[Fix[i], 1] = np.array(Locations[Fix][1].start + Pixels[1][NewSeed]).astype(np.uint32) Seeds[Fix[i], 0] = np.array(Locations[Fix][0].start + Pixels[0][NewSeed]).astype(np.uint32) # initialize tracking and segmentation masks Tracked = np.zeros(Max.shape, dtype=bool) Tracked[Label > 0] = True # track each pixel and update for i in np.arange(0, px.size, 1): # initialize tracking trajectory Id = 0 Alloc = 1 Trajectory = np.zeros((1000, 2), dtype=np.int) Trajectory[0, 0] = px[i] Trajectory[0, 1] = py[i] while ~Tracked[Trajectory[Id, 1], Trajectory[Id, 0]]: # increment trajectory counter Id += 1 # if overflow, copy and reallocate if Id == 1000 * Alloc: temp = Trajectory Trajectory = np.zeros((1000 * (Alloc + 1), 2), dtype=np.int) Trajectory[0 : 1000 * (Alloc),] = temp Alloc += 1 # add local max to trajectory Trajectory[Id, 0] = Col[Trajectory[Id - 1, 1], Trajectory[Id - 1, 0]] Trajectory[Id, 1] = Row[Trajectory[Id - 1, 1], Trajectory[Id - 1, 0]] # label sequence and add to tracked list Tracked[Trajectory[0:Id, 1], Trajectory[0:Id, 0]] = True Label[Trajectory[0:Id, 1], Trajectory[0:Id, 0]] = Label[Trajectory[Id, 1], Trajectory[Id, 0]] # return return Label, Seeds, Maxima
def compute_fixations(aligned, times, labels=None): """Compute fixations from aligned timeseries. Fixations are defined as contiguous samples of eyetracking data aligned to the same AoI. Parameters ---------- aligned : array, shape (n_trials, n_times) Eyetracking timeseries aligned to areas of interest. times : array, shape (n_times,) Time vector in seconds. labels : list List of areas of interest to include in processing. Defaults to all non-zero values in aligned. Returns ------- fixations : pd.DataFrame Pandas DataFrame where each row details the (Trial, AoI, Onset, Offset, Duration) of the fixation. Notes ----- Currently supports only monocular data. In the case of binocular data, the user can simply pass the aligned object twice (once per eye). """ ## Error-catching. assert np.ndim(aligned) == 2 assert np.shape(aligned)[-1] == np.size(times) ## Define labels list. if labels is None: labels = [i for i in np.unique(aligned) if i] ## Append extra timepoint to end of each trial. This prevents clusters across ## successive trials. n_trials, n_times = aligned.shape aligned = np.hstack([aligned, np.zeros((n_trials, 1))]).flatten() ## Precompute trial info. trials = np.repeat(np.arange(n_trials), n_times + 1) + 1 times = np.broadcast_to(np.append(times, 0), (n_trials, n_times + 1)).flatten() ## Preallocate space. df = DataFrame([], columns=('Trial', 'AoI', 'Onset', 'Offset')) for label in labels: ## Identify clusters. clusters, n_clusters = measurements.label(aligned == label) ## Identify cluster info. trial = measurements.minimum(trials, labels=clusters, index=np.arange(n_clusters) + 1) onset = measurements.minimum(times, labels=clusters, index=np.arange(n_clusters) + 1) offset = measurements.maximum(times, labels=clusters, index=np.arange(n_clusters) + 1) ## Append to DataFrame. dat = np.column_stack( (trial, np.ones_like(trial) * label, onset, offset)) df = df.append(DataFrame(dat, columns=df.columns)) df = df.sort_values(['Trial', 'Onset']).reset_index(drop=True) df['Duration'] = df.Offset - df.Onset return df
def max_clustering(Response, Mask, r=10): """Local max clustering pixel aggregation for nuclear segmentation. Takes as input a constrained log or other filtered nuclear image, a binary nuclear mask, and a clustering radius. For each pixel in the nuclear mask, the local max is identified. A hierarchy of local maxima is defined, and the root nodes used to define the label image. Parameters ---------- Response : array_like A filtered-smoothed image where the maxima correspond to nuclear center. Typically obtained by constrained-LoG filtering on a hematoxylin intensity image obtained from ColorDeconvolution. Mask : array_like A binary mask of type boolean where nuclei pixels have value 'True', and non-nuclear pixels have value 'False'. r : float A scalar defining the clustering radius. Default value = 10. Returns ------- Label : array_like Label image where positive values correspond to foreground pixels that share mutual sinks. Seeds : array_like An N x 2 array defining the (x,y) coordinates of nuclei seeds. Maxima : array_like An N x 1 array containing the maximum response value corresponding to 'Seeds'. See Also -------- histomicstk.filters.shape.clog References ---------- .. XW. Wu et al "The local maximum clustering method and its application in microarray gene expression data analysis," EURASIP J. Appl. Signal Processing, volume 2004, no.1, pp.53-63, 2004. .. Y. Al-Kofahi et al "Improved Automatic Detection and Segmentation of Cell Nuclei in Histopathology Images" in IEEE Transactions on Biomedical Engineering,vol.57,no.4,pp.847-52, 2010. """ # check type of input mask if Mask.dtype != np.dtype('bool'): raise TypeError("Input 'Mask' must be a bool") # define kernel for max filter Kernel = np.zeros((2*r+1, 2*r+1), dtype=bool) X, Y = np.meshgrid(np.linspace(0, 2*r, 2*r+1), np.linspace(0, 2*r, 2*r+1)) X -= r Y -= r Kernel[(X**2 + Y**2)**0.5 <= r] = True # define linear coordinates of postive kernel entries X = X[Kernel].astype(np.int) Y = Y[Kernel].astype(np.int) # pad input array to simplify filtering I = Response.min() * np.ones((Response.shape[0]+2*r, Response.shape[1]+2*r)) MaskedResponse = Response.copy() MaskedResponse[~Mask] = Response.min() I[r:r+Response.shape[0], r:r+Response.shape[1]] = MaskedResponse # initialize coordinate arrays and max value arrays Max = np.zeros(I.shape) Row = np.zeros(I.shape, dtype=np.int) Col = np.zeros(I.shape, dtype=np.int) # define pixels for local neighborhoods py, px = np.nonzero(Mask) py = py + np.int(r) px = px + np.int(r) # perform max filtering for i in np.arange(0, px.size, 1): # calculate local max value and position around px[i], py[i] Index = np.argmax(I[py[i]+Y, px[i]+X]) Max[py[i], px[i]] = I[py[i]+Y[Index], px[i]+X[Index]] Row[py[i], px[i]] = py[i] + Y[Index] - r Col[py[i], px[i]] = px[i] + X[Index] - r # trim outputs Max = Max[r:Response.shape[0]+r, r:Response.shape[1]+r] Row = Row[r:Response.shape[0]+r, r:Response.shape[1]+r] Col = Col[r:Response.shape[0]+r, r:Response.shape[1]+r] # subtract out padding offset for px, py py = py - r px = px - r # identify connected regions of local maxima and define their seeds Label = spm.label((Response == Max) & Mask)[0] Seeds = np.array(spm.center_of_mass(Response, Label, np.arange(1, Label.max()+1))) Seeds = np.round(Seeds).astype(np.uint32) # capture maxima for each connected region Maxima = spm.maximum(Response, Label, np.arange(1, Label.max()+1)) # handle seeds lying outside non-convex objects Fix = np.nonzero(Label[Seeds[:, 0].astype(np.uint32), Seeds[:, 1].astype(np.uint32)] != np.arange(1, Label.max()+1))[0] if(Fix.size > 0): Locations = spm.find_objects(Label) for i in np.arange(Fix.size): Patch = Label[Locations[Fix[i]]] Pixels = np.nonzero(Patch) dX = Pixels[1] - (Seeds[Fix[i], 1] - Locations[Fix][1].start) dY = Pixels[0] - (Seeds[Fix[i], 0] - Locations[Fix][0].start) Dist = (dX**2 + dY**2)**0.5 NewSeed = np.argmin(Dist) Seeds[Fix[i], 1] = np.array(Locations[Fix][1].start + Pixels[1][NewSeed]).astype(np.uint32) Seeds[Fix[i], 0] = np.array(Locations[Fix][0].start + Pixels[0][NewSeed]).astype(np.uint32) # initialize tracking and segmentation masks Tracked = np.zeros(Max.shape, dtype=bool) Tracked[Label > 0] = True # track each pixel and update for i in np.arange(0, px.size, 1): # initialize tracking trajectory Id = 0 Alloc = 1 Trajectory = np.zeros((1000, 2), dtype=np.int) Trajectory[0, 0] = px[i] Trajectory[0, 1] = py[i] while(~Tracked[Trajectory[Id, 1], Trajectory[Id, 0]]): # increment trajectory counter Id += 1 # if overflow, copy and reallocate if(Id == 1000*Alloc): temp = Trajectory Trajectory = np.zeros((1000*(Alloc+1), 2), dtype=np.int) Trajectory[0:1000*(Alloc), ] = temp Alloc += 1 # add local max to trajectory Trajectory[Id, 0] = Col[Trajectory[Id-1, 1], Trajectory[Id-1, 0]] Trajectory[Id, 1] = Row[Trajectory[Id-1, 1], Trajectory[Id-1, 0]] # label sequence and add to tracked list Tracked[Trajectory[0:Id, 1], Trajectory[0:Id, 0]] = True Label[Trajectory[0:Id, 1], Trajectory[0:Id, 0]] = \ Label[Trajectory[Id, 1], Trajectory[Id, 0]] # return return Label, Seeds, Maxima
def get_feature_snr(self, segment): return measurements.maximum(self.original_image.data, self.labels, segment.get_segmentid()) / self.rms_noise