def segment_secondary(self, marker_channel_name): # 1. load image img = self.load().astype(np.float64) / 255.0 mask = np.ones(img.shape) # 2. get primary objects labels_in = np.zeros(img.shape, dtype=float) marker_pks = np.array([marker.pk for marker in self.gon.composite.markers.filter(channel__name=marker_channel_name, track_instance__t=self.t)]) for marker in self.gon.composite.markers.filter(channel__name=marker_channel_name, track_instance__t=self.t): labels_in[marker.r-3:marker.r+2, marker.c-3:marker.c+2] = marker.pk / marker_pks.max() objects_segmented = labels_in.copy() labels_touching_edge = np.hstack((labels_in[0,:], labels_in[-1,:], labels_in[:,0], labels_in[:,-1])) labels_touching_edge = np.unique(labels_touching_edge) is_touching = np.zeros(np.max(labels_in)+1, bool) is_touching[labels_touching_edge.astype(int)] = True is_touching = is_touching[labels_in.astype(int)] labels_in[(~ is_touching) & (objects_segmented == 0)] = 0 ### PRIMARY OBJECTS # 3. threshold image to be segmented thresholded_image = threshold_image(img) # 5. actually do segmentation labels_out, distance = propagate(img, labels_in, thresholded_image, 0.01) small_removed_segmented_out = fill_labeled_holes(labels_out) segmented_out = filter_labels(small_removed_segmented_out, objects_segmented) return segmented_out
def segment_primary(self, min_size, max_size): img = self.load().astype(np.float64) / 255.0 # 1. threshold image binary_image = threshold_image(img) # 2. fill background holes in foreground objects def size_fn(size, is_foreground): return size < max_size binary_image = fill_labeled_holes(binary_image, size_fn=size_fn) # 3. perform recognition labeled_image, object_count = scipy.ndimage.label(binary_image, np.ones((3,3), bool)) labeled_image, object_count, maxima_suppression_size, LoG_threshold, LoG_filter_diameter = separate_neighboring_objects(img, labeled_image, object_count, min_size, max_size) # 4. fill holes again labeled_image = fill_labeled_holes(labeled_image) return labeled_image, object_count