def run(self, workspace): image_name = self.image_name.value image = workspace.image_set.get_image(image_name, must_be_grayscale=True) workspace.display_data.statistics = [] img = image.pixel_data mask = image.mask objects = workspace.object_set.get_objects(self.x_name.value) if img.shape != objects.shape: raise ValueError( "This module requires that the input image and object sets are the same size.\n" "The %s image and %s objects are not (%s vs %s).\n" "If they are paired correctly you may want to use the Resize, ResizeObjects or " "Crop module(s) to make them the same size." % ( image_name, self.x_name.value, img.shape, objects.shape, )) global_threshold = None if self.method == M_DISTANCE_N: has_threshold = False else: thresholded_image, global_threshold, sigma = self._threshold_image( image_name, workspace) workspace.display_data.global_threshold = global_threshold workspace.display_data.threshold_sigma = sigma has_threshold = True # # Get the following labels: # * all edited labels # * labels touching the edge, including small removed # labels_in = objects.unedited_segmented.copy() labels_touching_edge = numpy.hstack( (labels_in[0, :], labels_in[-1, :], labels_in[:, 0], labels_in[:, -1])) labels_touching_edge = numpy.unique(labels_touching_edge) is_touching = numpy.zeros(numpy.max(labels_in) + 1, bool) is_touching[labels_touching_edge] = True is_touching = is_touching[labels_in] labels_in[(~is_touching) & (objects.segmented == 0)] = 0 # # Stretch the input labels to match the image size. If there's no # label matrix, then there's no label in that area. # if tuple(labels_in.shape) != tuple(img.shape): tmp = numpy.zeros(img.shape, labels_in.dtype) i_max = min(img.shape[0], labels_in.shape[0]) j_max = min(img.shape[1], labels_in.shape[1]) tmp[:i_max, :j_max] = labels_in[:i_max, :j_max] labels_in = tmp if self.method in (M_DISTANCE_B, M_DISTANCE_N): if self.method == M_DISTANCE_N: distances, (i, j) = scipy.ndimage.distance_transform_edt( labels_in == 0, return_indices=True) labels_out = numpy.zeros(labels_in.shape, int) dilate_mask = distances <= self.distance_to_dilate.value labels_out[dilate_mask] = labels_in[i[dilate_mask], j[dilate_mask]] else: labels_out, distances = centrosome.propagate.propagate( img, labels_in, thresholded_image, 1.0) labels_out[distances > self.distance_to_dilate.value] = 0 labels_out[labels_in > 0] = labels_in[labels_in > 0] if self.fill_holes: label_mask = labels_out == 0 small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes( labels_out, mask=label_mask) else: small_removed_segmented_out = labels_out # # Create the final output labels by removing labels in the # output matrix that are missing from the segmented image # segmented_labels = objects.segmented segmented_out = self.filter_labels(small_removed_segmented_out, objects, workspace) elif self.method == M_PROPAGATION: labels_out, distance = centrosome.propagate.propagate( img, labels_in, thresholded_image, self.regularization_factor.value) if self.fill_holes: label_mask = labels_out == 0 small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes( labels_out, mask=label_mask) else: small_removed_segmented_out = labels_out.copy() segmented_out = self.filter_labels(small_removed_segmented_out, objects, workspace) elif self.method == M_WATERSHED_G: # # First, apply the sobel filter to the image (both horizontal # and vertical). The filter measures gradient. # sobel_image = numpy.abs(scipy.ndimage.sobel(img)) # # Combine the image mask and threshold to mask the watershed # watershed_mask = numpy.logical_or(thresholded_image, labels_in > 0) watershed_mask = numpy.logical_and(watershed_mask, mask) # # Perform the first watershed # labels_out = skimage.segmentation.watershed( connectivity=numpy.ones((3, 3), bool), image=sobel_image, markers=labels_in, mask=watershed_mask, ) if self.fill_holes: label_mask = labels_out == 0 small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes( labels_out, mask=label_mask) else: small_removed_segmented_out = labels_out.copy() segmented_out = self.filter_labels(small_removed_segmented_out, objects, workspace) elif self.method == M_WATERSHED_I: # # invert the image so that the maxima are filled first # and the cells compete over what's close to the threshold # inverted_img = 1 - img # # Same as above, but perform the watershed on the original image # watershed_mask = numpy.logical_or(thresholded_image, labels_in > 0) watershed_mask = numpy.logical_and(watershed_mask, mask) # # Perform the watershed # labels_out = skimage.segmentation.watershed( connectivity=numpy.ones((3, 3), bool), image=inverted_img, markers=labels_in, mask=watershed_mask, ) if self.fill_holes: label_mask = labels_out == 0 small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes( labels_out, mask=label_mask) else: small_removed_segmented_out = labels_out segmented_out = self.filter_labels(small_removed_segmented_out, objects, workspace) if self.wants_discard_edge and self.wants_discard_primary: # # Make a new primary object # lookup = scipy.ndimage.maximum( segmented_out, objects.segmented, list(range(numpy.max(objects.segmented) + 1)), ) lookup = centrosome.cpmorphology.fixup_scipy_ndimage_result(lookup) lookup[0] = 0 lookup[lookup != 0] = numpy.arange(numpy.sum(lookup != 0)) + 1 segmented_labels = lookup[objects.segmented] segmented_out = lookup[segmented_out] new_objects = Objects() new_objects.segmented = segmented_labels if objects.has_unedited_segmented: new_objects.unedited_segmented = objects.unedited_segmented if objects.has_small_removed_segmented: new_objects.small_removed_segmented = objects.small_removed_segmented new_objects.parent_image = objects.parent_image # # Add the objects to the object set # objects_out = Objects() objects_out.unedited_segmented = small_removed_segmented_out objects_out.small_removed_segmented = small_removed_segmented_out objects_out.segmented = segmented_out objects_out.parent_image = image objname = self.y_name.value workspace.object_set.add_objects(objects_out, objname) object_count = numpy.max(segmented_out) # # Add measurements # measurements = workspace.measurements super(IdentifySecondaryObjects, self).add_measurements(workspace) # # Relate the secondary objects to the primary ones and record # the relationship. # children_per_parent, parents_of_children = objects.relate_children( objects_out) measurements.add_measurement( self.x_name.value, FF_CHILDREN_COUNT % objname, children_per_parent, ) measurements.add_measurement( objname, FF_PARENT % self.x_name.value, parents_of_children, ) image_numbers = (numpy.ones(len(parents_of_children), int) * measurements.image_set_number) mask = parents_of_children > 0 measurements.add_relate_measurement( self.module_num, R_PARENT, self.x_name.value, self.y_name.value, image_numbers[mask], parents_of_children[mask], image_numbers[mask], numpy.arange(1, len(parents_of_children) + 1)[mask], ) # # If primary objects were created, add them # if self.wants_discard_edge and self.wants_discard_primary: workspace.object_set.add_objects( new_objects, self.new_primary_objects_name.value) super(IdentifySecondaryObjects, self).add_measurements( workspace, input_object_name=self.x_name.value, output_object_name=self.new_primary_objects_name.value, ) children_per_parent, parents_of_children = new_objects.relate_children( objects_out) measurements.add_measurement( self.new_primary_objects_name.value, FF_CHILDREN_COUNT % objname, children_per_parent, ) measurements.add_measurement( objname, FF_PARENT % self.new_primary_objects_name.value, parents_of_children, ) if self.show_window: object_area = numpy.sum(segmented_out > 0) workspace.display_data.object_pct = ( 100 * object_area / numpy.product(segmented_out.shape)) workspace.display_data.img = img workspace.display_data.segmented_out = segmented_out workspace.display_data.primary_labels = objects.segmented workspace.display_data.global_threshold = global_threshold workspace.display_data.object_count = object_count
def run(self, workspace): objects_name = self.objects_name.value objects = workspace.object_set.get_objects(objects_name) assert isinstance(objects, Objects) labels = objects.segmented if self.relabel_option == OPTION_SPLIT: output_labels, count = scipy.ndimage.label( labels > 0, numpy.ones((3, 3), bool)) else: if self.merge_option == UNIFY_DISTANCE: mask = labels > 0 if self.distance_threshold.value > 0: # # Take the distance transform of the reverse of the mask # and figure out what points are less than 1/2 of the # distance from an object. # d = scipy.ndimage.distance_transform_edt(~mask) mask = d < self.distance_threshold.value / 2 + 1 output_labels, count = scipy.ndimage.label( mask, numpy.ones((3, 3), bool)) output_labels[labels == 0] = 0 if self.wants_image: output_labels = self.filter_using_image(workspace, mask) elif self.merge_option == UNIFY_PARENT: parents_name = self.parent_object.value parents_of = workspace.measurements[objects_name, "_".join( (C_PARENT, parents_name))] output_labels = labels.copy().astype(numpy.uint32) output_labels[labels > 0] = parents_of[labels[labels > 0] - 1] if self.merging_method == UM_CONVEX_HULL: ch_pts, n_pts = centrosome.cpmorphology.convex_hull( output_labels) ijv = centrosome.cpmorphology.fill_convex_hulls( ch_pts, n_pts) output_labels[ijv[:, 0], ijv[:, 1]] = ijv[:, 2] output_objects = Objects() output_objects.segmented = output_labels if objects.has_small_removed_segmented: output_objects.small_removed_segmented = copy_labels( objects.small_removed_segmented, output_labels) if objects.has_unedited_segmented: output_objects.unedited_segmented = copy_labels( objects.unedited_segmented, output_labels) output_objects.parent_image = objects.parent_image workspace.object_set.add_objects(output_objects, self.output_objects_name.value) measurements = workspace.measurements add_object_count_measurements( measurements, self.output_objects_name.value, numpy.max(output_objects.segmented), ) add_object_location_measurements(measurements, self.output_objects_name.value, output_objects.segmented) # # Relate the output objects to the input ones and record # the relationship. # children_per_parent, parents_of_children = objects.relate_children( output_objects) measurements.add_measurement( self.objects_name.value, FF_CHILDREN_COUNT % self.output_objects_name.value, children_per_parent, ) measurements.add_measurement( self.output_objects_name.value, FF_PARENT % self.objects_name.value, parents_of_children, ) if self.show_window: workspace.display_data.orig_labels = objects.segmented workspace.display_data.output_labels = output_objects.segmented if self.merge_option == UNIFY_PARENT: workspace.display_data.parent_labels = workspace.object_set.get_objects( self.parent_object.value).segmented