def run(self, workspace): image_name = self.image_name.value objects_name = self.objects_name.value image = workspace.image_set.get_image(image_name) pixel_data = image.pixel_data labels = workspace.interaction_request( self, pixel_data, workspace.measurements.image_set_number) if labels is None: # User cancelled. Soldier on as best we can. workspace.cancel_request() labels = np.zeros(pixel_data.shape[:2], int) objects = cpo.Objects() objects.segmented = labels workspace.object_set.add_objects(objects, objects_name) ################## # # Add measurements # m = workspace.measurements # # The object count # object_count = np.max(labels) I.add_object_count_measurements(m, objects_name, object_count) # # The object locations # I.add_object_location_measurements(m, objects_name, labels) workspace.display_data.labels = labels workspace.display_data.pixel_data = pixel_data
def run(self, workspace): '''Find the outlines on the current image set workspace - The workspace contains pipeline - instance of cpp for this run image_set - the images in the image set being processed object_set - the objects (labeled masks) in this image set measurements - the measurements for this run frame - the parent frame to whatever frame is created. None means don't draw. ''' gridding = workspace.get_grid(self.grid_name.value) if self.shape_choice == SHAPE_RECTANGLE: labels = self.run_rectangle(workspace, gridding) elif self.shape_choice == SHAPE_CIRCLE_FORCED: labels = self.run_forced_circle(workspace, gridding) elif self.shape_choice == SHAPE_CIRCLE_NATURAL: labels = self.run_natural_circle(workspace, gridding) elif self.shape_choice == SHAPE_NATURAL: labels = self.run_natural(workspace, gridding) objects = cpo.Objects() objects.segmented = labels object_count = gridding.rows * gridding.columns workspace.object_set.add_objects(objects, self.output_objects_name.value) add_object_location_measurements(workspace.measurements, self.output_objects_name.value, labels, object_count) add_object_count_measurements(workspace.measurements, self.output_objects_name.value, object_count) if self.wants_outlines: outlines = outline(labels!=0) outline_image = cpi.Image(outlines) workspace.image_set.add(self.outlines_name.value, outline_image)
def run(self, workspace): input_objects = workspace.object_set.get_objects(self.object_name.value) output_objects = cpo.Objects() output_objects.segmented = self.do_labels(input_objects.segmented) if (input_objects.has_small_removed_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.small_removed_segmented = \ self.do_labels(input_objects.small_removed_segmented) if (input_objects.has_unedited_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.unedited_segmented = \ self.do_labels(input_objects.unedited_segmented) workspace.object_set.add_objects(output_objects, self.output_object_name.value) add_object_count_measurements(workspace.measurements, self.output_object_name.value, np.max(output_objects.segmented)) add_object_location_measurements(workspace.measurements, self.output_object_name.value, output_objects.segmented) if self.wants_outlines.value: outline_image = cpi.Image(outline(output_objects.segmented) > 0, parent_image=input_objects.parent_image) workspace.image_set.add(self.outlines_name.value, outline_image) if self.show_window: workspace.display_data.input_objects_segmented = input_objects.segmented workspace.display_data.output_objects_segmented = output_objects.segmented
def run(self, workspace): input_objects = workspace.object_set.get_objects(self.object_name.value) output_objects = cpo.Objects() output_objects.segmented = self.do_labels(input_objects.segmented) if (input_objects.has_small_removed_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.small_removed_segmented = \ self.do_labels(input_objects.small_removed_segmented) if (input_objects.has_unedited_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.unedited_segmented = \ self.do_labels(input_objects.unedited_segmented) workspace.object_set.add_objects(output_objects, self.output_object_name.value) add_object_count_measurements(workspace.measurements, self.output_object_name.value, np.max(output_objects.segmented)) add_object_location_measurements(workspace.measurements, self.output_object_name.value, output_objects.segmented) if self.wants_outlines.value: outline_image = cpi.Image(outline(output_objects.segmented) > 0, parent_image = input_objects.parent_image) workspace.image_set.add(self.outlines_name.value, outline_image) if self.show_window: workspace.display_data.input_objects_segmented = input_objects.segmented workspace.display_data.output_objects_segmented = output_objects.segmented
def run(self, workspace): '''Find the outlines on the current image set workspace - The workspace contains pipeline - instance of cpp for this run image_set - the images in the image set being processed object_set - the objects (labeled masks) in this image set measurements - the measurements for this run frame - the parent frame to whatever frame is created. None means don't draw. ''' gridding = workspace.get_grid(self.grid_name.value) if self.shape_choice == SHAPE_RECTANGLE: labels = self.run_rectangle(workspace, gridding) elif self.shape_choice == SHAPE_CIRCLE_FORCED: labels = self.run_forced_circle(workspace, gridding) elif self.shape_choice == SHAPE_CIRCLE_NATURAL: labels = self.run_natural_circle(workspace, gridding) elif self.shape_choice == SHAPE_NATURAL: labels = self.run_natural(workspace, gridding) objects = cpo.Objects() objects.segmented = labels object_count = gridding.rows * gridding.columns workspace.object_set.add_objects(objects, self.output_objects_name.value) add_object_location_measurements(workspace.measurements, self.output_objects_name.value, labels, object_count) add_object_count_measurements(workspace.measurements, self.output_objects_name.value, object_count) if self.show_window: workspace.display_data.gridding = gridding workspace.display_data.labels = labels
def run(self, workspace): input_objects = workspace.object_set.get_objects(self.object_name.value) output_objects = cpo.Objects() output_objects.segmented = self.do_labels(input_objects.segmented) if (input_objects.has_small_removed_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.small_removed_segmented = \ self.do_labels(input_objects.small_removed_segmented) if (input_objects.has_unedited_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.unedited_segmented = \ self.do_labels(input_objects.unedited_segmented) workspace.object_set.add_objects(output_objects, self.output_object_name.value) add_object_count_measurements(workspace.measurements, self.output_object_name.value, np.max(output_objects.segmented)) add_object_location_measurements(workspace.measurements, self.output_object_name.value, output_objects.segmented) if self.wants_outlines.value: outline_image = cpi.Image(outline(output_objects.segmented) > 0, parent_image = input_objects.parent_image) workspace.image_set.add(self.outlines_name.value, outline_image) if workspace.frame is not None: figure = workspace.create_or_find_figure(title="ExpandOrShrinkObjects, image cycle #%d"%( workspace.measurements.image_set_number),subplots=(2,1)) figure.subplot_imshow_labels(0,0,input_objects.segmented, self.object_name.value) figure.subplot_imshow_labels(1,0,output_objects.segmented, self.output_object_name.value, sharex = figure.subplot(0,0), sharey = figure.subplot(0,0))
def run(self, workspace): objects = workspace.object_set.get_objects(self.objects_name.value) assert isinstance(objects, cpo.Objects) labels = objects.segmented if self.relabel_option == OPTION_SPLIT: output_labels, count = scind.label(labels > 0, np.ones((3, 3), bool)) else: if self.unify_option == UNIFY_DISTANCE: mask = labels > 0 if self.distance_threshold.value > 0: # # Take the distance transform of the reverse of the mask # and figure out what points are less than 1/2 of the # distance from an object. # d = scind.distance_transform_edt(~mask) mask = d < self.distance_threshold.value / 2 + 1 output_labels, count = scind.label(mask, np.ones((3, 3), bool)) output_labels[labels == 0] = 0 if self.wants_image: output_labels = self.filter_using_image(workspace, mask) elif self.unify_option == UNIFY_PARENT: parent_objects = workspace.object_set.get_objects(self.parent_object.value) output_labels = parent_objects.segmented.copy() output_labels[labels == 0] = 0 output_objects = cpo.Objects() output_objects.segmented = output_labels if objects.has_small_removed_segmented: output_objects.small_removed_segmented = copy_labels(objects.small_removed_segmented, output_labels) if objects.has_unedited_segmented: output_objects.unedited_segmented = copy_labels(objects.unedited_segmented, output_labels) output_objects.parent_image = objects.parent_image workspace.object_set.add_objects(output_objects, self.output_objects_name.value) measurements = workspace.measurements add_object_count_measurements(measurements, self.output_objects_name.value, np.max(output_objects.segmented)) add_object_location_measurements(measurements, self.output_objects_name.value, output_objects.segmented) # # Relate the output objects to the input ones and record # the relationship. # children_per_parent, parents_of_children = objects.relate_children(output_objects) measurements.add_measurement( self.objects_name.value, FF_CHILDREN_COUNT % self.output_objects_name.value, children_per_parent ) measurements.add_measurement( self.output_objects_name.value, FF_PARENT % self.objects_name.value, parents_of_children ) if self.wants_outlines: outlines = cellprofiler.cpmath.outline.outline(output_labels) outline_image = cpi.Image(outlines.astype(bool)) workspace.image_set.add(self.outlines_name.value, outline_image) if workspace.frame is not None: workspace.display_data.orig_labels = objects.segmented workspace.display_data.output_labels = output_objects.segmented
def run(self, workspace): input_objects = workspace.object_set.get_objects( self.object_name.value) output_objects = cpo.Objects() output_objects.segmented = self.do_labels(input_objects.segmented) if (input_objects.has_small_removed_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.small_removed_segmented = \ self.do_labels(input_objects.small_removed_segmented) if (input_objects.has_unedited_segmented and self.operation not in (O_EXPAND, O_EXPAND_INF, O_DIVIDE)): output_objects.unedited_segmented = \ self.do_labels(input_objects.unedited_segmented) workspace.object_set.add_objects(output_objects, self.output_object_name.value) add_object_count_measurements(workspace.measurements, self.output_object_name.value, np.max(output_objects.segmented)) add_object_location_measurements(workspace.measurements, self.output_object_name.value, output_objects.segmented) if self.wants_outlines.value: outline_image = cpi.Image(outline(output_objects.segmented) > 0, parent_image=input_objects.parent_image) workspace.image_set.add(self.outlines_name.value, outline_image) if workspace.frame is not None: figure = workspace.create_or_find_figure( title="ExpandOrShrinkObjects, image cycle #%d" % (workspace.measurements.image_set_number), subplots=(2, 1)) figure.subplot_imshow_labels(0, 0, input_objects.segmented, self.object_name.value) figure.subplot_imshow_labels(1, 0, output_objects.segmented, self.output_object_name.value, sharex=figure.subplot(0, 0), sharey=figure.subplot(0, 0))
def run(self, workspace): input_objects = workspace.object_set.get_objects( self.object_name.value) output_objects = cpo.Objects() output_objects.segmented = self.do_labels(input_objects.segmented) if (input_objects.has_small_removed_segmented): output_objects.small_removed_segmented = \ self.do_labels(input_objects.small_removed_segmented) if (input_objects.has_unedited_segmented): output_objects.unedited_segmented = \ self.do_labels(input_objects.unedited_segmented) workspace.object_set.add_objects(output_objects, self.output_object_name.value) add_object_count_measurements(workspace.measurements, self.output_object_name.value, np.max(output_objects.segmented)) add_object_location_measurements(workspace.measurements, self.output_object_name.value, output_objects.segmented) if self.show_window: workspace.display_data.input_objects_segmented = input_objects.segmented workspace.display_data.output_objects_segmented = output_objects.segmented
def run(self, workspace): objects = workspace.object_set.get_objects(self.objects_name.value) assert isinstance(objects, cpo.Objects) labels = objects.segmented if self.relabel_option == OPTION_SPLIT: output_labels, count = scind.label(labels > 0, np.ones((3, 3), bool)) else: if self.unify_option == UNIFY_DISTANCE: mask = labels > 0 if self.distance_threshold.value > 0: # # Take the distance transform of the reverse of the mask # and figure out what points are less than 1/2 of the # distance from an object. # d = scind.distance_transform_edt(~mask) mask = d < self.distance_threshold.value / 2 + 1 output_labels, count = scind.label(mask, np.ones((3, 3), bool)) output_labels[labels == 0] = 0 if self.wants_image: output_labels = self.filter_using_image(workspace, mask) elif self.unify_option == UNIFY_PARENT: parent_objects = workspace.object_set.get_objects( self.parent_object.value) output_labels = parent_objects.segmented.copy() output_labels[labels == 0] = 0 output_objects = cpo.Objects() output_objects.segmented = output_labels if objects.has_small_removed_segmented: output_objects.small_removed_segmented = \ copy_labels(objects.small_removed_segmented, output_labels) if objects.has_unedited_segmented: output_objects.unedited_segmented = \ copy_labels(objects.unedited_segmented, output_labels) output_objects.parent_image = objects.parent_image workspace.object_set.add_objects(output_objects, self.output_objects_name.value) measurements = workspace.measurements add_object_count_measurements(measurements, self.output_objects_name.value, np.max(output_objects.segmented)) add_object_location_measurements(measurements, self.output_objects_name.value, output_objects.segmented) # # Relate the output objects to the input ones and record # the relationship. # children_per_parent, parents_of_children = \ objects.relate_children(output_objects) measurements.add_measurement( self.objects_name.value, FF_CHILDREN_COUNT % self.output_objects_name.value, children_per_parent) measurements.add_measurement(self.output_objects_name.value, FF_PARENT % self.objects_name.value, parents_of_children) if self.wants_outlines: outlines = cellprofiler.cpmath.outline.outline(output_labels) outline_image = cpi.Image(outlines.astype(bool)) workspace.image_set.add(self.outlines_name.value, outline_image) if self.show_window: workspace.display_data.orig_labels = objects.segmented workspace.display_data.output_labels = output_objects.segmented
def run(self, workspace): """Run the module on the current data set workspace - has the current image set, object set, measurements and the parent frame for the application if the module is allowed to display. If the module should not display, workspace.frame is None. """ # # The object set holds "objects". Each of these is a container # for holding up to three kinds of image labels. # object_set = workspace.object_set # # Get the primary objects (the centers to be removed). # Get the string value out of primary_object_name. # primary_objects = object_set.get_objects( self.primary_objects_name.value) # # Get the cleaned-up labels image # primary_labels = primary_objects.segmented # # Do the same with the secondary object secondary_objects = object_set.get_objects( self.secondary_objects_name.value) secondary_labels = secondary_objects.segmented # # If one of the two label images is smaller than the other, we # try to find the cropping mask and we apply that mask to the larger # try: if any([ p_size < s_size for p_size, s_size in zip( primary_labels.shape, secondary_labels.shape) ]): # # Look for a cropping mask associated with the primary_labels # and apply that mask to resize the secondary labels # secondary_labels = primary_objects.crop_image_similarly( secondary_labels) tertiary_image = primary_objects.parent_image elif any([ p_size > s_size for p_size, s_size in zip( primary_labels.shape, secondary_labels.shape) ]): primary_labels = secondary_objects.crop_image_similarly( primary_labels) tertiary_image = secondary_objects.parent_image elif secondary_objects.parent_image is not None: tertiary_image = secondary_objects.parent_image else: tertiary_image = primary_objects.parent_image except ValueError: # No suitable cropping - resize all to fit the secondary # labels which are the most critical. # primary_labels, _ = cpo.size_similarly(secondary_labels, primary_labels) if secondary_objects.parent_image is not None: tertiary_image = secondary_objects.parent_image else: tertiary_image = primary_objects.parent_image if tertiary_image is not None: tertiary_image, _ = cpo.size_similarly( secondary_labels, tertiary_image) # # Find the outlines of the primary image and use this to shrink the # primary image by one. This guarantees that there is something left # of the secondary image after subtraction # primary_outline = outline(primary_labels) tertiary_labels = secondary_labels.copy() if self.shrink_primary: primary_mask = np.logical_or(primary_labels == 0, primary_outline) else: primary_mask = primary_labels == 0 tertiary_labels[primary_mask == False] = 0 # # Get the outlines of the tertiary image # tertiary_outlines = outline(tertiary_labels) != 0 # # Make the tertiary objects container # tertiary_objects = cpo.Objects() tertiary_objects.segmented = tertiary_labels tertiary_objects.parent_image = tertiary_image # # Relate tertiary objects to their parents & record # child_count_of_secondary, secondary_parents = \ secondary_objects.relate_children(tertiary_objects) if self.shrink_primary: child_count_of_primary, primary_parents = \ primary_objects.relate_children(tertiary_objects) else: # Primary and tertiary don't overlap. # Establish overlap between primary and secondary and commute _, secondary_of_primary = \ secondary_objects.relate_children(primary_objects) mask = secondary_of_primary != 0 child_count_of_primary = np.zeros(mask.shape, int) child_count_of_primary[mask] = child_count_of_secondary[ secondary_of_primary[mask] - 1] primary_parents = np.zeros(secondary_parents.shape, secondary_parents.dtype) primary_of_secondary = np.zeros(secondary_objects.count + 1, int) primary_of_secondary[secondary_of_primary] = \ np.arange(1, len(secondary_of_primary) + 1) primary_of_secondary[0] = 0 primary_parents = primary_of_secondary[secondary_parents] # # Write out the objects # workspace.object_set.add_objects(tertiary_objects, self.subregion_objects_name.value) # # Write out the measurements # m = workspace.measurements # # The parent/child associations # for parent_objects_name, parents_of, child_count, relationship in ( (self.primary_objects_name, primary_parents, child_count_of_primary, R_REMOVED), (self.secondary_objects_name, secondary_parents, child_count_of_secondary, R_PARENT)): m.add_measurement( self.subregion_objects_name.value, cellprofiler.measurement.FF_PARENT % parent_objects_name.value, parents_of) m.add_measurement( parent_objects_name.value, cellprofiler.measurement.FF_CHILDREN_COUNT % self.subregion_objects_name.value, child_count) mask = parents_of != 0 image_number = np.ones(np.sum(mask), int) * m.image_set_number child_object_number = np.argwhere(mask).flatten() + 1 parent_object_number = parents_of[mask] m.add_relate_measurement(self.module_num, relationship, parent_objects_name.value, self.subregion_objects_name.value, image_number, parent_object_number, image_number, child_object_number) object_count = tertiary_objects.count # # The object count # cpmi.add_object_count_measurements(workspace.measurements, self.subregion_objects_name.value, object_count) # # The object locations # cpmi.add_object_location_measurements( workspace.measurements, self.subregion_objects_name.value, tertiary_labels) if self.show_window: workspace.display_data.primary_labels = primary_labels workspace.display_data.secondary_labels = secondary_labels workspace.display_data.tertiary_labels = tertiary_labels workspace.display_data.tertiary_outlines = tertiary_outlines
def run(self, workspace): objects_name = self.objects_name.value objects = workspace.object_set.get_objects(objects_name) assert isinstance(objects, cpo.Objects) labels = objects.segmented if self.relabel_option == OPTION_SPLIT: output_labels, count = scind.label(labels > 0, np.ones((3,3),bool)) else: if self.unify_option == UNIFY_DISTANCE: mask = labels > 0 if self.distance_threshold.value > 0: # # Take the distance transform of the reverse of the mask # and figure out what points are less than 1/2 of the # distance from an object. # d = scind.distance_transform_edt(~mask) mask = d < self.distance_threshold.value/2+1 output_labels, count = scind.label(mask, np.ones((3,3), bool)) output_labels[labels == 0] = 0 if self.wants_image: output_labels = self.filter_using_image(workspace, mask) elif self.unify_option == UNIFY_PARENT: parents_name = self.parent_object.value parents_of = workspace.measurements[ objects_name, "_".join((C_PARENT, parents_name))] output_labels = labels.copy().astype(np.uint32) output_labels[labels > 0] = parents_of[labels[labels > 0]-1] if self.unification_method == UM_CONVEX_HULL: ch_pts, n_pts = morph.convex_hull(output_labels) ijv = morph.fill_convex_hulls(ch_pts, n_pts) output_labels[ijv[:, 0], ijv[:, 1]] = ijv[:, 2] output_objects = cpo.Objects() output_objects.segmented = output_labels if objects.has_small_removed_segmented: output_objects.small_removed_segmented = \ copy_labels(objects.small_removed_segmented, output_labels) if objects.has_unedited_segmented: output_objects.unedited_segmented = \ copy_labels(objects.unedited_segmented, output_labels) output_objects.parent_image = objects.parent_image workspace.object_set.add_objects(output_objects, self.output_objects_name.value) measurements = workspace.measurements add_object_count_measurements(measurements, self.output_objects_name.value, np.max(output_objects.segmented)) add_object_location_measurements(measurements, self.output_objects_name.value, output_objects.segmented) # # Relate the output objects to the input ones and record # the relationship. # children_per_parent, parents_of_children = \ objects.relate_children(output_objects) measurements.add_measurement(self.objects_name.value, FF_CHILDREN_COUNT % self.output_objects_name.value, children_per_parent) measurements.add_measurement(self.output_objects_name.value, FF_PARENT%self.objects_name.value, parents_of_children) if self.wants_outlines: outlines = centrosome.outline.outline(output_labels) outline_image = cpi.Image(outlines.astype(bool)) workspace.image_set.add(self.outlines_name.value, outline_image) if self.show_window: workspace.display_data.orig_labels = objects.segmented workspace.display_data.output_labels = output_objects.segmented if self.unify_option == UNIFY_PARENT: workspace.display_data.parent_labels = \ workspace.object_set.get_objects(self.parent_object.value).segmented
def run(self, workspace): """Run the module workspace - The workspace contains pipeline - instance of cpp for this run image_set - the images in the image set being processed object_set - the objects (labeled masks) in this image set measurements - the measurements for this run frame - the parent frame to whatever frame is created. None means don't draw. """ orig_objects_name = self.object_name.value filtered_objects_name = self.filtered_objects.value orig_objects = workspace.object_set.get_objects(orig_objects_name) assert isinstance(orig_objects, cpo.Objects) orig_labels = [l for l, c in orig_objects.get_labels()] if self.wants_image_display: guide_image = workspace.image_set.get_image(self.image_name.value) guide_image = guide_image.pixel_data if np.any(guide_image != np.min(guide_image)): guide_image = (guide_image - np.min(guide_image)) / ( np.max(guide_image) - np.min(guide_image)) else: guide_image = None filtered_labels = workspace.interaction_request( self, orig_labels, guide_image, workspace.measurements.image_set_number) if filtered_labels is None: # Ask whoever is listening to stop doing stuff workspace.cancel_request() # Have to soldier on until the cancel takes effect... filtered_labels = orig_labels # # Renumber objects consecutively if asked to do so # unique_labels = np.unique(np.array(filtered_labels)) unique_labels = unique_labels[unique_labels != 0] object_count = len(unique_labels) if self.renumber_choice == R_RENUMBER: mapping = np.zeros( 1 if len(unique_labels) == 0 else np.max(unique_labels) + 1, int) mapping[unique_labels] = np.arange(1, object_count + 1) filtered_labels = [mapping[l] for l in filtered_labels] # # Make the objects out of the labels # filtered_objects = cpo.Objects() i, j = np.mgrid[0:filtered_labels[0].shape[0], 0:filtered_labels[0].shape[1]] ijv = np.zeros((0, 3), filtered_labels[0].dtype) for l in filtered_labels: ijv = np.vstack( (ijv, np.column_stack((i[l != 0], j[l != 0], l[l != 0])))) filtered_objects.set_ijv(ijv, orig_labels[0].shape) if orig_objects.has_unedited_segmented(): filtered_objects.unedited_segmented = orig_objects.unedited_segmented if orig_objects.parent_image is not None: filtered_objects.parent_image = orig_objects.parent_image workspace.object_set.add_objects(filtered_objects, filtered_objects_name) # # Add parent/child & other measurements # m = workspace.measurements child_count, parents = orig_objects.relate_children(filtered_objects) m.add_measurement( filtered_objects_name, cellprofiler.measurement.FF_PARENT % orig_objects_name, parents, ) m.add_measurement( orig_objects_name, cellprofiler.measurement.FF_CHILDREN_COUNT % filtered_objects_name, child_count, ) # # The object count # I.add_object_count_measurements(m, filtered_objects_name, object_count) # # The object locations # I.add_object_location_measurements_ijv(m, filtered_objects_name, ijv) workspace.display_data.orig_ijv = orig_objects.ijv workspace.display_data.filtered_ijv = filtered_objects.ijv workspace.display_data.shape = orig_labels[0].shape
def run(self, workspace): """Run the module on the current data set workspace - has the current image set, object set, measurements and the parent frame for the application if the module is allowed to display. If the module should not display, workspace.frame is None. """ # # The object set holds "objects". Each of these is a container # for holding up to three kinds of image labels. # object_set = workspace.object_set # # Get the primary objects (the centers to be removed). # Get the string value out of primary_object_name. # primary_objects = object_set.get_objects(self.primary_objects_name.value) # # Get the cleaned-up labels image # primary_labels = primary_objects.segmented # # Do the same with the secondary object secondary_objects = object_set.get_objects(self.secondary_objects_name.value) secondary_labels = secondary_objects.segmented # # If one of the two label images is smaller than the other, we # try to find the cropping mask and we apply that mask to the larger # try: if any([p_size < s_size for p_size, s_size in zip(primary_labels.shape, secondary_labels.shape)]): # # Look for a cropping mask associated with the primary_labels # and apply that mask to resize the secondary labels # secondary_labels = primary_objects.crop_image_similarly(secondary_labels) tertiary_image = primary_objects.parent_image elif any([p_size > s_size for p_size, s_size in zip(primary_labels.shape, secondary_labels.shape)]): primary_labels = secondary_objects.crop_image_similarly(primary_labels) tertiary_image = secondary_objects.parent_image elif secondary_objects.parent_image is not None: tertiary_image = secondary_objects.parent_image else: tertiary_image = primary_objects.parent_image except ValueError: # No suitable cropping - resize all to fit the secondary # labels which are the most critical. # primary_labels, _ = cpo.size_similarly(secondary_labels, primary_labels) if secondary_objects.parent_image is not None: tertiary_image = secondary_objects.parent_image else: tertiary_image = primary_objects.parent_image if tertiary_image is not None: tertiary_image, _ = cpo.size_similarly(secondary_labels, tertiary_image) # # Find the outlines of the primary image and use this to shrink the # primary image by one. This guarantees that there is something left # of the secondary image after subtraction # primary_outline = outline(primary_labels) tertiary_labels = secondary_labels.copy() if self.shrink_primary: primary_mask = np.logical_or(primary_labels == 0, primary_outline) else: primary_mask = primary_labels == 0 tertiary_labels[primary_mask == False] = 0 # # Get the outlines of the tertiary image # tertiary_outlines = outline(tertiary_labels) != 0 # # Make the tertiary objects container # tertiary_objects = cpo.Objects() tertiary_objects.segmented = tertiary_labels tertiary_objects.parent_image = tertiary_image # # Relate tertiary objects to their parents & record # child_count_of_secondary, secondary_parents = \ secondary_objects.relate_children(tertiary_objects) if self.shrink_primary: child_count_of_primary, primary_parents = \ primary_objects.relate_children(tertiary_objects) else: # Primary and tertiary don't overlap. # Establish overlap between primary and secondary and commute _, secondary_of_primary = \ secondary_objects.relate_children(primary_objects) mask = secondary_of_primary != 0 child_count_of_primary = np.zeros(mask.shape, int) child_count_of_primary[mask] = child_count_of_secondary[ secondary_of_primary[mask] - 1] primary_parents = np.zeros(secondary_parents.shape, secondary_parents.dtype) primary_of_secondary = np.zeros(secondary_objects.count + 1, int) primary_of_secondary[secondary_of_primary] = \ np.arange(1, len(secondary_of_primary) + 1) primary_of_secondary[0] = 0 primary_parents = primary_of_secondary[secondary_parents] # # Write out the objects # workspace.object_set.add_objects(tertiary_objects, self.subregion_objects_name.value) # # Write out the measurements # m = workspace.measurements # # The parent/child associations # for parent_objects_name, parents_of, child_count, relationship in ( (self.primary_objects_name, primary_parents, child_count_of_primary, R_REMOVED), (self.secondary_objects_name, secondary_parents, child_count_of_secondary, R_PARENT)): m.add_measurement(self.subregion_objects_name.value, cellprofiler.measurement.FF_PARENT % parent_objects_name.value, parents_of) m.add_measurement(parent_objects_name.value, cellprofiler.measurement.FF_CHILDREN_COUNT % self.subregion_objects_name.value, child_count) mask = parents_of != 0 image_number = np.ones(np.sum(mask), int) * m.image_set_number child_object_number = np.argwhere(mask).flatten() + 1 parent_object_number = parents_of[mask] m.add_relate_measurement( self.module_num, relationship, parent_objects_name.value, self.subregion_objects_name.value, image_number, parent_object_number, image_number, child_object_number) object_count = tertiary_objects.count # # The object count # cpmi.add_object_count_measurements(workspace.measurements, self.subregion_objects_name.value, object_count) # # The object locations # cpmi.add_object_location_measurements(workspace.measurements, self.subregion_objects_name.value, tertiary_labels) if self.show_window: workspace.display_data.primary_labels = primary_labels workspace.display_data.secondary_labels = secondary_labels workspace.display_data.tertiary_labels = tertiary_labels workspace.display_data.tertiary_outlines = tertiary_outlines
def run(self, workspace): statistics = [] m = workspace.measurements assert isinstance(m, cpmeas.Measurements) # # Hack: if LoadSingleImage is first, no paths are populated # if self.file_wants_images(self.file_settings[0]): m_path = "_".join( (C_PATH_NAME, self.file_settings[0].image_name.value)) else: m_path = "_".join((C_OBJECTS_PATH_NAME, self.file_settings[0].objects_name.value)) if m.get_current_image_measurement(m_path) is None: self.prepare_run(workspace) image_set = workspace.image_set for file_setting in self.file_settings: wants_images = self.file_wants_images(file_setting) image_name = (file_setting.image_name.value if wants_images else file_setting.objects_name.value) m_path, m_file, m_md5_digest, m_scaling, m_height, m_width = [ "_".join((c, image_name)) for c in ( C_PATH_NAME if wants_images else C_OBJECTS_PATH_NAME, C_FILE_NAME if wants_images else C_OBJECTS_FILE_NAME, C_MD5_DIGEST, C_SCALING, C_HEIGHT, C_WIDTH, ) ] pathname = m.get_current_image_measurement(m_path) filename = m.get_current_image_measurement(m_file) rescale = wants_images and file_setting.rescale.value provider = LoadImagesImageProvider(image_name, pathname, filename, rescale) image = provider.provide_image(image_set) pixel_data = image.pixel_data if wants_images: md5 = provider.get_md5_hash(m) m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)), md5) m.add_image_measurement("_".join((C_SCALING, image_name)), image.scale) m.add_image_measurement("_".join((C_HEIGHT, image_name)), int(pixel_data.shape[0])) m.add_image_measurement("_".join((C_WIDTH, image_name)), int(pixel_data.shape[1])) image_set.providers.append(provider) else: # # Turn image into objects # labels = convert_image_to_objects(pixel_data) objects = cpo.Objects() objects.segmented = labels object_set = workspace.object_set assert isinstance(object_set, cpo.ObjectSet) object_set.add_objects(objects, image_name) add_object_count_measurements(m, image_name, objects.count) add_object_location_measurements(m, image_name, labels) # # Add outlines if appropriate # if file_setting.wants_outlines: outlines = centrosome.outline.outline(labels) outline_image = cpi.Image(outlines.astype(bool)) workspace.image_set.add(file_setting.outlines_name.value, outline_image) statistics += [(image_name, filename)] workspace.display_data.col_labels = ("Image name", "File") workspace.display_data.statistics = statistics
def run(self, workspace): '''Filter objects for this image set, display results''' src_objects = workspace.get_objects(self.object_name.value) if self.rules_or_measurement == ROM_RULES: indexes = self.keep_by_rules(workspace, src_objects) elif self.filter_choice in (FI_MINIMAL, FI_MAXIMAL): indexes = self.keep_one(workspace, src_objects) elif self.filter_choice in (FI_MINIMAL_PER_OBJECT, FI_MAXIMAL_PER_OBJECT): indexes = self.keep_per_object(workspace, src_objects) elif self.filter_choice == FI_LIMITS: indexes = self.keep_within_limits(workspace, src_objects) else: raise ValueError("Unknown filter choice: %s" % self.filter_choice.value) # # Create an array that maps label indexes to their new values # All labels to be deleted have a value in this array of zero # new_object_count = len(indexes) max_label = np.max(src_objects.segmented) label_indexes = np.zeros((max_label + 1, ), int) label_indexes[indexes] = np.arange(1, new_object_count + 1) # # Loop over both the primary and additional objects # object_list = ( [(self.object_name.value, self.target_name.value, self.wants_outlines.value, self.outlines_name.value)] + [(x.object_name.value, x.target_name.value, x.wants_outlines.value, x.outlines_name.value) for x in self.additional_objects]) m = workspace.measurements for src_name, target_name, wants_outlines, outlines_name in object_list: src_objects = workspace.get_objects(src_name) target_labels = src_objects.segmented.copy() # # Reindex the labels of the old source image # target_labels[target_labels > max_label] = 0 target_labels = label_indexes[target_labels] # # Make a new set of objects - retain the old set's unedited # segmentation for the new and generally try to copy stuff # from the old to the new. # target_objects = cpo.Objects() target_objects.segmented = target_labels target_objects.unedited_segmented = src_objects.unedited_segmented if src_objects.has_parent_image: target_objects.parent_image = src_objects.parent_image workspace.object_set.add_objects(target_objects, target_name) # # Add measurements for the new objects add_object_count_measurements(m, target_name, new_object_count) add_object_location_measurements(m, target_name, target_labels) # # Relate the old numbering to the new numbering # m.add_measurement(target_name, FF_PARENT % (src_name), np.array(indexes)) # # Count the children (0 / 1) # child_count = (label_indexes[1:] > 0).astype(int) m.add_measurement(src_name, FF_CHILDREN_COUNT % target_name, child_count) # # Add an outline if asked to do so # if wants_outlines: outline_image = cpi.Image( outline(target_labels) > 0, parent_image=target_objects.parent_image) workspace.image_set.add(outlines_name, outline_image)
def run(self, workspace): """Run the module workspace - The workspace contains pipeline - instance of cpp for this run image_set - the images in the image set being processed object_set - the objects (labeled masks) in this image set measurements - the measurements for this run frame - the parent frame to whatever frame is created. None means don't draw. """ orig_objects_name = self.object_name.value filtered_objects_name = self.filtered_objects.value orig_objects = workspace.object_set.get_objects(orig_objects_name) assert isinstance(orig_objects, cpo.Objects) orig_labels = [l for l, c in orig_objects.get_labels()] if self.wants_image_display: guide_image = workspace.image_set.get_image(self.image_name.value) guide_image = guide_image.pixel_data if np.any(guide_image != np.min(guide_image)): guide_image = (guide_image - np.min(guide_image)) / (np.max(guide_image) - np.min(guide_image)) else: guide_image = None filtered_labels = workspace.interaction_request( self, orig_labels, guide_image, workspace.measurements.image_set_number) if filtered_labels is None: # Ask whoever is listening to stop doing stuff workspace.cancel_request() # Have to soldier on until the cancel takes effect... filtered_labels = orig_labels # # Renumber objects consecutively if asked to do so # unique_labels = np.unique(np.array(filtered_labels)) unique_labels = unique_labels[unique_labels != 0] object_count = len(unique_labels) if self.renumber_choice == R_RENUMBER: mapping = np.zeros(1 if len(unique_labels) == 0 else np.max(unique_labels) + 1, int) mapping[unique_labels] = np.arange(1, object_count + 1) filtered_labels = [mapping[l] for l in filtered_labels] # # Make the objects out of the labels # filtered_objects = cpo.Objects() i, j = np.mgrid[0:filtered_labels[0].shape[0], 0:filtered_labels[0].shape[1]] ijv = np.zeros((0, 3), filtered_labels[0].dtype) for l in filtered_labels: ijv = np.vstack((ijv, np.column_stack((i[l != 0], j[l != 0], l[l != 0])))) filtered_objects.set_ijv(ijv, orig_labels[0].shape) if orig_objects.has_unedited_segmented(): filtered_objects.unedited_segmented = orig_objects.unedited_segmented if orig_objects.parent_image is not None: filtered_objects.parent_image = orig_objects.parent_image workspace.object_set.add_objects(filtered_objects, filtered_objects_name) # # Add parent/child & other measurements # m = workspace.measurements child_count, parents = orig_objects.relate_children(filtered_objects) m.add_measurement(filtered_objects_name, cellprofiler.measurement.FF_PARENT % orig_objects_name, parents) m.add_measurement(orig_objects_name, cellprofiler.measurement.FF_CHILDREN_COUNT % filtered_objects_name, child_count) # # The object count # I.add_object_count_measurements(m, filtered_objects_name, object_count) # # The object locations # I.add_object_location_measurements_ijv(m, filtered_objects_name, ijv) workspace.display_data.orig_ijv = orig_objects.ijv workspace.display_data.filtered_ijv = filtered_objects.ijv workspace.display_data.shape = orig_labels[0].shape
def run(self, workspace): statistics = [] m = workspace.measurements assert isinstance(m, cpmeas.Measurements) # # Hack: if LoadSingleImage is first, no paths are populated # if self.file_wants_images(self.file_settings[0]): m_path = "_".join((C_PATH_NAME, self.file_settings[0].image_name.value)) else: m_path = "_".join((C_OBJECTS_PATH_NAME, self.file_settings[0].objects_name.value)) if m.get_current_image_measurement(m_path) is None: self.prepare_run(workspace) image_set = workspace.image_set for file_setting in self.file_settings: wants_images = self.file_wants_images(file_setting) image_name = file_setting.image_name.value if wants_images else \ file_setting.objects_name.value m_path, m_file, m_md5_digest, m_scaling, m_height, m_width = [ "_".join((c, image_name)) for c in ( C_PATH_NAME if wants_images else C_OBJECTS_PATH_NAME, C_FILE_NAME if wants_images else C_OBJECTS_FILE_NAME, C_MD5_DIGEST, C_SCALING, C_HEIGHT, C_WIDTH)] pathname = m.get_current_image_measurement(m_path) filename = m.get_current_image_measurement(m_file) rescale = (wants_images and file_setting.rescale.value) provider = LoadImagesImageProvider( image_name, pathname, filename, rescale) image = provider.provide_image(image_set) pixel_data = image.pixel_data if wants_images: md5 = provider.get_md5_hash(m) m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)), md5) m.add_image_measurement("_".join((C_SCALING, image_name)), image.scale) m.add_image_measurement("_".join((C_HEIGHT, image_name)), int(pixel_data.shape[0])) m.add_image_measurement("_".join((C_WIDTH, image_name)), int(pixel_data.shape[1])) image_set.providers.append(provider) else: # # Turn image into objects # labels = convert_image_to_objects(pixel_data) objects = cpo.Objects() objects.segmented = labels object_set = workspace.object_set assert isinstance(object_set, cpo.ObjectSet) object_set.add_objects(objects, image_name) add_object_count_measurements(m, image_name, objects.count) add_object_location_measurements(m, image_name, labels) # # Add outlines if appropriate # if file_setting.wants_outlines: outlines = centrosome.outline.outline(labels) outline_image = cpi.Image(outlines.astype(bool)) workspace.image_set.add(file_setting.outlines_name.value, outline_image) statistics += [(image_name, filename)] workspace.display_data.col_labels = ("Image name", "File") workspace.display_data.statistics = statistics
def run(self, workspace): '''Run the module on an image set''' object_name = self.object_name.value remaining_object_name = self.remaining_objects.value original_objects = workspace.object_set.get_objects(object_name) if self.mask_choice == MC_IMAGE: mask = workspace.image_set.get_image(self.masking_image.value, must_be_binary=True) mask = mask.pixel_data else: masking_objects = workspace.object_set.get_objects( self.masking_objects.value) mask = masking_objects.segmented > 0 if self.wants_inverted_mask: mask = ~mask # # Load the labels # labels = original_objects.segmented.copy() nobjects = np.max(labels) # # Resize the mask to cover the objects # mask, m1 = cpo.size_similarly(labels, mask) mask[~m1] = False # # Apply the mask according to the overlap choice. # if nobjects == 0: pass elif self.overlap_choice == P_MASK: labels = labels * mask else: pixel_counts = fix(scind.sum(mask, labels, np.arange(1, nobjects + 1, dtype=np.int32))) if self.overlap_choice == P_KEEP: keep = pixel_counts > 0 else: total_pixels = fix(scind.sum(np.ones(labels.shape), labels, np.arange(1, nobjects + 1, dtype=np.int32))) if self.overlap_choice == P_REMOVE: keep = pixel_counts == total_pixels elif self.overlap_choice == P_REMOVE_PERCENTAGE: fraction = self.overlap_fraction.value keep = pixel_counts / total_pixels >= fraction else: raise NotImplementedError("Unknown overlap-handling choice: %s", self.overlap_choice.value) keep = np.hstack(([False], keep)) labels[~ keep[labels]] = 0 # # Renumber the labels matrix if requested # if self.retain_or_renumber == R_RENUMBER: unique_labels = np.unique(labels[labels != 0]) indexer = np.zeros(nobjects + 1, int) indexer[unique_labels] = np.arange(1, len(unique_labels) + 1) labels = indexer[labels] parent_objects = unique_labels else: parent_objects = np.arange(1, nobjects + 1) # # Add the objects # remaining_objects = cpo.Objects() remaining_objects.segmented = labels remaining_objects.unedited_segmented = original_objects.unedited_segmented workspace.object_set.add_objects(remaining_objects, remaining_object_name) # # Add measurements # m = workspace.measurements m.add_measurement(remaining_object_name, cellprofiler.measurement.FF_PARENT % object_name, parent_objects) if np.max(original_objects.segmented) == 0: child_count = np.array([], int) else: child_count = fix(scind.sum(labels, original_objects.segmented, np.arange(1, nobjects + 1, dtype=np.int32))) child_count = (child_count > 0).astype(int) m.add_measurement(object_name, cellprofiler.measurement.FF_CHILDREN_COUNT % remaining_object_name, child_count) if self.retain_or_renumber == R_RETAIN: remaining_object_count = nobjects else: remaining_object_count = len(unique_labels) I.add_object_count_measurements(m, remaining_object_name, remaining_object_count) I.add_object_location_measurements(m, remaining_object_name, labels) # # Save the input, mask and output images for display # if self.show_window: workspace.display_data.original_labels = original_objects.segmented workspace.display_data.final_labels = labels workspace.display_data.mask = mask
def run(self, workspace): '''Filter objects for this image set, display results''' src_objects = workspace.get_objects(self.object_name.value) if self.mode == MODE_RULES: indexes = self.keep_by_rules(workspace, src_objects) elif self.mode == MODE_MEASUREMENTS: if self.filter_choice in (FI_MINIMAL, FI_MAXIMAL): indexes = self.keep_one(workspace, src_objects) if self.filter_choice in (FI_MINIMAL_PER_OBJECT, FI_MAXIMAL_PER_OBJECT): indexes = self.keep_per_object(workspace, src_objects) if self.filter_choice == FI_LIMITS: indexes = self.keep_within_limits(workspace, src_objects) elif self.mode == MODE_BORDER: indexes = self.discard_border_objects(workspace, src_objects) else: raise ValueError("Unknown filter choice: %s"% self.filter_choice.value) # # Create an array that maps label indexes to their new values # All labels to be deleted have a value in this array of zero # new_object_count = len(indexes) max_label = np.max(src_objects.segmented) label_indexes = np.zeros((max_label+1,),int) label_indexes[indexes] = np.arange(1,new_object_count+1) # # Loop over both the primary and additional objects # object_list = ([(self.object_name.value, self.target_name.value, self.wants_outlines.value, self.outlines_name.value)] + [(x.object_name.value, x.target_name.value, x.wants_outlines.value, x.outlines_name.value) for x in self.additional_objects]) m = workspace.measurements for src_name, target_name, wants_outlines, outlines_name in object_list: src_objects = workspace.get_objects(src_name) target_labels = src_objects.segmented.copy() # # Reindex the labels of the old source image # target_labels[target_labels > max_label] = 0 target_labels = label_indexes[target_labels] # # Make a new set of objects - retain the old set's unedited # segmentation for the new and generally try to copy stuff # from the old to the new. # target_objects = cpo.Objects() target_objects.segmented = target_labels target_objects.unedited_segmented = src_objects.unedited_segmented if src_objects.has_parent_image: target_objects.parent_image = src_objects.parent_image workspace.object_set.add_objects(target_objects, target_name) # # Add measurements for the new objects add_object_count_measurements(m, target_name, new_object_count) add_object_location_measurements(m, target_name, target_labels) # # Relate the old numbering to the new numbering # m.add_measurement(target_name, FF_PARENT%(src_name), np.array(indexes)) # # Count the children (0 / 1) # child_count = (label_indexes[1:] > 0).astype(int) m.add_measurement(src_name, FF_CHILDREN_COUNT % target_name, child_count) # # Add an outline if asked to do so # if wants_outlines: outline_image = cpi.Image(outline(target_labels) > 0, parent_image = target_objects.parent_image) workspace.image_set.add(outlines_name, outline_image) if self.show_window: src_objects = workspace.get_objects(src_name) image_names = \ [image for image in [m.measurement.get_image_name(workspace.pipeline) for m in self.measurements] if image is not None and image in workspace.image_set.get_names()] if len(image_names) == 0: # Measurement isn't image-based if src_objects.has_parent_image: image = src_objects.parent_image.pixel_data else: image = None else: image = workspace.image_set.get_image(image_names[0]).pixel_data workspace.display_data.src_objects_segmented = \ src_objects.segmented workspace.display_data.image_names = image_names workspace.display_data.image = image workspace.display_data.target_objects_segmented = target_objects.segmented
def run(self, workspace): '''Run the module on an image set''' object_name = self.object_name.value remaining_object_name = self.remaining_objects.value original_objects = workspace.object_set.get_objects(object_name) if self.mask_choice == MC_IMAGE: mask = workspace.image_set.get_image(self.masking_image.value, must_be_binary=True) mask = mask.pixel_data else: masking_objects = workspace.object_set.get_objects( self.masking_objects.value) mask = masking_objects.segmented > 0 if self.wants_inverted_mask: mask = ~mask # # Load the labels # labels = original_objects.segmented.copy() nobjects = np.max(labels) # # Resize the mask to cover the objects # mask, m1 = cpo.size_similarly(labels, mask) mask[~m1] = False # # Apply the mask according to the overlap choice. # if nobjects == 0: pass elif self.overlap_choice == P_MASK: labels = labels * mask else: pixel_counts = fix( scind.sum(mask, labels, np.arange(1, nobjects + 1, dtype=np.int32))) if self.overlap_choice == P_KEEP: keep = pixel_counts > 0 else: total_pixels = fix( scind.sum(np.ones(labels.shape), labels, np.arange(1, nobjects + 1, dtype=np.int32))) if self.overlap_choice == P_REMOVE: keep = pixel_counts == total_pixels elif self.overlap_choice == P_REMOVE_PERCENTAGE: fraction = self.overlap_fraction.value keep = pixel_counts / total_pixels >= fraction else: raise NotImplementedError( "Unknown overlap-handling choice: %s", self.overlap_choice.value) keep = np.hstack(([False], keep)) labels[~keep[labels]] = 0 # # Renumber the labels matrix if requested # if self.retain_or_renumber == R_RENUMBER: unique_labels = np.unique(labels[labels != 0]) indexer = np.zeros(nobjects + 1, int) indexer[unique_labels] = np.arange(1, len(unique_labels) + 1) labels = indexer[labels] parent_objects = unique_labels else: parent_objects = np.arange(1, nobjects + 1) # # Add the objects # remaining_objects = cpo.Objects() remaining_objects.segmented = labels remaining_objects.unedited_segmented = original_objects.unedited_segmented workspace.object_set.add_objects(remaining_objects, remaining_object_name) # # Add measurements # m = workspace.measurements m.add_measurement(remaining_object_name, cellprofiler.measurement.FF_PARENT % object_name, parent_objects) if np.max(original_objects.segmented) == 0: child_count = np.array([], int) else: child_count = fix( scind.sum(labels, original_objects.segmented, np.arange(1, nobjects + 1, dtype=np.int32))) child_count = (child_count > 0).astype(int) m.add_measurement( object_name, cellprofiler.measurement.FF_CHILDREN_COUNT % remaining_object_name, child_count) if self.retain_or_renumber == R_RETAIN: remaining_object_count = nobjects else: remaining_object_count = len(unique_labels) I.add_object_count_measurements(m, remaining_object_name, remaining_object_count) I.add_object_location_measurements(m, remaining_object_name, labels) # # Save the input, mask and output images for display # if self.show_window: workspace.display_data.original_labels = original_objects.segmented workspace.display_data.final_labels = labels workspace.display_data.mask = mask
def run(self, workspace): objects = workspace.object_set.get_objects(self.objects_name.value) assert isinstance(objects, cpo.Objects) labels = objects.segmented half_window_size = self.window_size.value threshold = self.threshold.value output_labels = objects.segmented.copy() max_objects = np.max(output_labels) indices = np.arange(max_objects+1) # Get object slices object_slices = morph.fixup_scipy_ndimage_result(scind.measurements.find_objects(output_labels)) # Calculate perimeters perimeters = morph.calculate_perimeters(output_labels, indices) # Find the neighbors neighbors = np.zeros((max_objects+1, max_objects+1), bool) # neighbors[i,j] = True when object j is a neighbor of object i. lmax = scind.grey_dilation(output_labels, footprint=np.ones((3,3), bool)) # lower pixel values will be replaced by adjacent larger pixel values lbig = output_labels.copy() lbig[lbig == 0] = np.iinfo(output_labels.dtype).max # set the background to be large so that it is ignored next lmin = scind.grey_erosion(lbig, footprint=np.ones((3,3), bool)) # larger pixel values will be replaced by adjacent smaller pixel values for i in range(1, max_objects+1): object_bounds = (object_slices[i-1][0],object_slices[i-1][1]) object_map = output_labels[object_bounds] == i # The part of the slice that contains the object lower_neighbors = np.unique(lmin[object_bounds][object_map]) higher_neighbors = np.unique(lmax[object_bounds][object_map]) for neighbor_list in [lower_neighbors, higher_neighbors]: for j in range(0, len(neighbor_list)): neighbors[i,neighbor_list[j]] = True neighbors[i,i] = False # Generate window dimensions for each location (necessary for the edges) dim1_window = np.ones(output_labels.shape, int) * half_window_size dim2_window = np.ones(output_labels.shape, int) * half_window_size for i in range(0, half_window_size): dim1_window[i:output_labels.shape[0]-i,0:output_labels.shape[1]] += 1 dim2_window[0:output_labels.shape[0],i:output_labels.shape[1]-i] += 1 # Loop over all objects for k in range(1, max_objects+1): if (perimeters[k] == 0) or not np.max(neighbors[k]): # Has been removed by a merge or has no neighbors continue k_bounds_array = object_slices[k-1] k_dim1_bounds = k_bounds_array[0].indices(output_labels.shape[0]) k_dim2_bounds = k_bounds_array[1].indices(output_labels.shape[1]) # Loop over all neighbors of object k l = 0 while l < max_objects: l += 1 if k == l or (perimeters[l] == 0) or not neighbors[k,l]: # Has been removed by a merge or is not a neighbor of object k continue l_bounds_array = object_slices[l-1] l_dim1_bounds = l_bounds_array[0].indices(output_labels.shape[0]) l_dim2_bounds = l_bounds_array[1].indices(output_labels.shape[1]) kl_dim1_min_bound = min(k_dim1_bounds[0], l_dim1_bounds[0]) kl_dim1_max_bound = max(k_dim1_bounds[1], l_dim1_bounds[1]) kl_dim2_min_bound = min(k_dim2_bounds[0], l_dim2_bounds[0]) kl_dim2_max_bound = max(k_dim2_bounds[1], l_dim2_bounds[1]) kl_bounds = (slice(kl_dim1_min_bound, kl_dim1_max_bound), slice(kl_dim2_min_bound, kl_dim2_max_bound)) kl_shape = (kl_dim1_max_bound - kl_dim1_min_bound, kl_dim2_max_bound - kl_dim2_min_bound) # # Find the vertices of object pair k, l # vertices = np.zeros(kl_shape, bool) isAdjacent = np.zeros(output_labels.shape, bool) isBorder = np.zeros(output_labels.shape, bool) for i in range(-1,2): ki_min = 0 ki_max = 0 li_min = 0 li_max = 0 if k_dim1_bounds[0] + i < 0: ki_min = -i else: ki_min = k_dim1_bounds[0] if l_dim1_bounds[0] + i < 0: li_min = -i else: li_min = l_dim1_bounds[0] if k_dim1_bounds[1] + i > output_labels.shape[0]: ki_max = output_labels.shape[0] - i else: ki_max = k_dim1_bounds[1] if l_dim1_bounds[1] + i > output_labels.shape[0]: li_max = output_labels.shape[0] - i else: li_max = l_dim1_bounds[1] ki_slice = slice(ki_min, ki_max) li_slice = slice(li_min, li_max) ki_test_slice = slice(ki_min+i, ki_max+i) li_test_slice = slice(li_min+i, li_max+i) for j in range(-1,2): if i == j == 0: continue kj_min = 0 kj_max = 0 lj_min = 0 lj_max = 0 if k_dim2_bounds[0] + j < 0: kj_min = -j else: kj_min = k_dim2_bounds[0] if l_dim2_bounds[0] + j < 0: lj_min = -j else: lj_min = l_dim2_bounds[0] if k_dim2_bounds[1] + j > output_labels.shape[0]: kj_max = output_labels.shape[0] - j else: kj_max = k_dim2_bounds[1] if l_dim2_bounds[1] + j > output_labels.shape[0]: lj_max = output_labels.shape[0] - j else: lj_max = l_dim2_bounds[1] if (kj_min == kj_max or ki_min == ki_max) and (lj_min == lj_max or li_min == li_max): continue kj_slice = slice(kj_min, kj_max) lj_slice = slice(lj_min, lj_max) kj_test_slice = slice(kj_min+j, kj_max+j) lj_test_slice = slice(lj_min+j, lj_max+j) k_bounds = (ki_slice, kj_slice) l_bounds = (li_slice, lj_slice) k_test_bounds = (ki_test_slice, kj_test_slice) l_test_bounds = (li_test_slice, lj_test_slice) kl_mod_bounds = (slice(min(ki_min, li_min), max(ki_max, li_max)), slice(min(kj_min, lj_min), max(kj_max, lj_max))) isAdjacentSlice = np.zeros(output_labels.shape, bool) isAdjacentSlice[k_bounds] = np.logical_and(output_labels[k_bounds] == k, output_labels[k_test_bounds] == l) isAdjacentSlice[l_bounds] = np.logical_or(isAdjacentSlice[l_bounds], np.logical_and(output_labels[l_bounds] == l, output_labels[l_test_bounds] == k)) isAdjacent[kl_mod_bounds] = np.logical_or(isAdjacent[kl_mod_bounds], isAdjacentSlice[kl_mod_bounds]) isBorderSlice = np.zeros(output_labels.shape, bool) isBorderSlice[k_bounds] = np.logical_and(output_labels[k_bounds] == k, np.logical_and(output_labels[k_test_bounds] != k, output_labels[k_test_bounds] != l)) isBorderSlice[l_bounds] = np.logical_or(isBorderSlice[l_bounds], np.logical_and(output_labels[l_bounds] == l, np.logical_and(output_labels[l_test_bounds] != k, output_labels[l_test_bounds] != l))) isBorder[kl_mod_bounds] = np.logical_or(isBorder[kl_mod_bounds], isBorderSlice[kl_mod_bounds]) vertices = np.logical_and(isAdjacent[kl_bounds], isBorder[kl_bounds]) # # Calculate the maximum vertex score for the pair # '''+1 for every non-I/J labeled pixel in the window +1 more for every non-I/J labeled pixel adjacent to it Divided by the score that would result if the objects were flat e.g. (vertex is starred) 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 1 1 *1* 2 2 0 1 1 1 1 2 2 2 1 1 1 1 2 2 2 1 1 1 2 2 2 2 If it were flat, it would be 7 * 3 = 21, so the divisor 7 * 6 = 42. In the case that the window is rectangular (such as at the image edge), we split the difference. Generalized: Vertex Score = (Sum of NotIJ) / [A * B - 0.5 * (A + B)] ''' sum_not_IJ = np.zeros(kl_shape, int) for i in range(0, kl_shape[0]): for j in range(0,kl_shape[1]): if not vertices[i,j]: continue window_bounds = (slice(i - half_window_size + kl_dim1_min_bound, i + half_window_size + kl_dim1_min_bound), slice(j - half_window_size + kl_dim2_min_bound, j + half_window_size + kl_dim2_min_bound)) window_slice = output_labels[window_bounds] sum_not_IJ[i,j] = count_nonzero(np.logical_and(window_slice != k, window_slice != l)) # Should be np.count_nonzero, but it doesn't exist in the version that comes with # CellProfiler 2.0 r11710 vertex_scores = sum_not_IJ / (dim1_window[kl_bounds] * dim2_window[kl_bounds] - 0.5 * (dim1_window[kl_bounds] + dim2_window[kl_bounds])) merged = np.zeros(kl_shape, int) merged[output_labels[kl_bounds] == k] = 1 merged[output_labels[kl_bounds] == l] = 1 merged_perimeter = morph.calculate_perimeters(merged, [1]) seam_length = (perimeters[k] + perimeters[l] - merged_perimeter) / 2 min_external_perimeter = min(perimeters[k], perimeters[l]) - seam_length adjusted_min_external_perimeter = min_external_perimeter / np.pi seam_fraction = seam_length / (adjusted_min_external_perimeter + seam_length) max_vertex_score = np.max(vertex_scores) score = (max_vertex_score + seam_fraction) / 2 # # Join object pair with score above the threshold # if score >= threshold: output_labels[output_labels == l] = k # Calculate new perimeters perimeters[k] = merged_perimeter perimeters[l] = 0 # Reconfigure neighbors neighbors[k] = np.logical_or(neighbors[k], neighbors[l]) for x in range(1, max_objects+1): # Is there an array method to do this? if neighbors[x,l]: neighbors[x,k] = True neighbors[0:max_objects,l] = False # Recalculate bounds k_dim1_bounds = kl_bounds[0].indices(output_labels.shape[0]) k_dim2_bounds = kl_bounds[1].indices(output_labels.shape[1]) # Reset l to 0 so that we check all the neighbors against this new object l = 0 else: pass output_objects = cpo.Objects() output_objects.segmented = output_labels if objects.has_small_removed_segmented: output_objects.small_removed_segmented = \ copy_labels(objects.small_removed_segmented, output_labels) if objects.has_unedited_segmented: output_objects.unedited_segmented = \ copy_labels(objects.unedited_segmented, output_labels) output_objects.parent_image = objects.parent_image workspace.object_set.add_objects(output_objects, self.output_objects_name.value) measurements = workspace.measurements add_object_count_measurements(measurements, self.output_objects_name.value, np.max(output_objects.segmented)) add_object_location_measurements(measurements, self.output_objects_name.value, output_objects.segmented) # # Relate the output objects to the input ones and record # the relationship. # children_per_parent, parents_of_children = \ objects.relate_children(output_objects) measurements.add_measurement(self.objects_name.value, FF_CHILDREN_COUNT % self.output_objects_name.value, children_per_parent) measurements.add_measurement(self.output_objects_name.value, FF_PARENT%self.objects_name.value, parents_of_children) if self.wants_outlines: outlines = cellprofiler.cpmath.outline.outline(output_labels) outline_image = cpi.Image(outlines.astype(bool)) workspace.image_set.add(self.outlines_name.value, outline_image) if workspace.frame is not None: workspace.display_data.orig_labels = objects.segmented workspace.display_data.output_labels = output_objects.segmented
def run(self, workspace): import cellprofiler.modules.identify as I # # Get the input and output image names. You need to get the .value # because otherwise you'll get the setting object instead of # the string name. # input_image_name = self.input_image_name.value prediction_image_name = self.prediction_image_name.value input_objects_name = self.input_objects.value output_objects_name = self.output_objects.value # # Get the image set. The image set has all of the images in it. # The assert statement makes sure that it really is an image set, # but, more importantly, it lets my editor do context-sensitive # completion for the image set. # image_set = workspace.image_set assert isinstance(image_set, cpi.ImageSet) # # Get the input image object. We want a grayscale image here. # The image set will convert a color image to a grayscale one # and warn the user. # input_image = image_set.get_image(input_image_name, must_be_grayscale = True).pixel_data # # I do something a little odd here and elsewhere. I normalize the # brightness by ordering the image pixels by brightness. I notice that # the samples vary in intensity (why?) and EM is a scanning technology # (right?) so there should be uniform illumination across an image. # r = np.random.RandomState() r.seed(np.sum((input_image * 65535).astype(np.uint16))) npixels = np.prod(input_image.shape) shape = input_image.shape order = np.lexsort([r.uniform(size=npixels), input_image.flatten()]) input_image = np.zeros(npixels) input_image[order] = np.arange(npixels).astype(float) / npixels input_image.shape = shape prediction_image = image_set.get_image(prediction_image_name, must_be_grayscale=True).pixel_data object_set = workspace.object_set assert isinstance(object_set, cpo.ObjectSet) input_objects = object_set.get_objects(input_objects_name) assert isinstance(input_objects, cpo.Objects) input_labeling = input_objects.segmented # # Find the border pixels - 4 connected # # There will be some repeats in here - I'm being lazy and I'm not # removing them. The inaccuracies will be random. # touch = ((input_labeling[1:, :] != input_labeling[:-1, :]) & (input_labeling[1:, :] != 0) & (input_labeling[:-1, :] != 0)) touchpair = np.argwhere(touch) touchpair = np.column_stack([ # touchpair[:,0:2] are the left coords touchpair, # touchpair[:,2:4] are the right coords touchpair + np.array([1,0], int)[np.newaxis,:], # touchpair[:,4] is the identity of the left object input_labeling[touchpair[:,0], touchpair[:, 1]], # touchpair[:,5] is the identity of the right object input_labeling[touchpair[:,0]+1, touchpair[:, 1]]]) TP_I0 = 0 TP_J0 = 1 TP_I1 = 2 TP_J1 = 3 TP_L0 = 4 TP_L1 = 5 touch = ((input_labeling[:, 1:] != input_labeling[:, :-1]) & (input_labeling[:, 1:] != 0) & (input_labeling[:, :-1] != 0)) tp2 = np.argwhere(touch) touchpair = np.vstack([touchpair, np.column_stack([ tp2, tp2 + np.array([0, 1], int)[np.newaxis,:], input_labeling[tp2[:, 0], tp2[:, 1]], input_labeling[tp2[:, 0], tp2[:, 1]+1]])]) if np.any(touch): # # Broadcast the touchpair counts and scores into sparse arrays. # The sparse array convention is to sum duplicates # counts = coo_matrix( (np.ones(touchpair.shape[0], int), (touchpair[:,TP_L0], touchpair[:,TP_L1])), shape=[input_objects.count+1]*2).toarray() scores = coo_matrix( (prediction_image[touchpair[:, TP_I0], touchpair[:, TP_J0]] + prediction_image[touchpair[:, TP_I1], touchpair[:, TP_J1]], (touchpair[:,TP_L0], touchpair[:,TP_L1])), shape=[input_objects.count+1]*2).toarray() / 2.0 scores = scores / counts to_remove = ((counts > self.min_border.value) & (scores > 1 - self.min_support.value)) else: to_remove = np.zeros((0,2), bool) # # For all_connected_components, do forward and backward links and # self-to-self links # remove_pairs = np.vstack([ np.argwhere(to_remove), np.argwhere(to_remove.transpose()), np.column_stack([np.arange(np.max(input_labeling)+1)] * 2)]) # # Find small objects and dark objects # areas = np.bincount(input_labeling.flatten()) brightness = np.bincount(input_labeling.flatten(), input_image.flatten()) brightness = brightness / areas # to_remove = ((areas < self.min_area.value) | (brightness < self.darkness.value)) to_remove[0] = False # # Find the biggest neighbor to all. If no neighbors, label = 0 # largest = np.zeros(areas.shape[0], np.uint32) if np.any(touch): largest[:counts.shape[1]] = \ np.argmax(np.maximum(counts, counts.transpose()), 0) remove_pairs = np.vstack([ remove_pairs, np.column_stack([np.arange(len(to_remove))[to_remove], largest[to_remove]])]) lnumbers = all_connected_components(remove_pairs[:, 0], remove_pairs[:, 1]).astype(np.uint32) # # Renumber. # output_labeling = lnumbers[input_labeling] # # Fill holes. # output_labeling = fill_labeled_holes(output_labeling) # # Remove the border pixels. This is for the challenge which requires # a mask. # can_remove = lnumbers[touchpair[:, TP_L0]] != lnumbers[touchpair[:, TP_L1]] output_labeling[touchpair[can_remove, TP_I0], touchpair[can_remove, TP_J0]] = 0 output_labeling[touchpair[can_remove, TP_I1], touchpair[can_remove, TP_J1]] = 0 output_objects = cpo.Objects() output_objects.segmented = output_labeling object_set.add_objects(output_objects, output_objects_name) nobjects = np.max(output_labeling) I.add_object_count_measurements(workspace.measurements, output_objects_name, nobjects) I.add_object_location_measurements(workspace.measurements, output_objects_name, output_labeling, nobjects) # Make an outline image outline_image = cellprofiler.cpmath.outline.outline(output_labeling).astype(bool) out_img = cpi.Image(outline_image, parent_image = image_set.get_image(input_image_name)) workspace.image_set.add(self.output_outlines_name.value, out_img) workspace.display_data.input_pixels = input_image workspace.display_data.input_labels = input_labeling workspace.display_data.output_labels = output_labeling workspace.display_data.outlines = outline_image
def run(self, workspace): objects_name = self.objects_name.value objects = workspace.object_set.get_objects(objects_name) assert isinstance(objects, cpo.Objects) labels = objects.segmented if self.relabel_option == OPTION_SPLIT: output_labels, count = scind.label(labels > 0, np.ones((3, 3), bool)) else: if self.merge_option == UNIFY_DISTANCE: mask = labels > 0 if self.distance_threshold.value > 0: # # Take the distance transform of the reverse of the mask # and figure out what points are less than 1/2 of the # distance from an object. # d = scind.distance_transform_edt(~mask) mask = d < self.distance_threshold.value / 2 + 1 output_labels, count = scind.label(mask, np.ones((3, 3), bool)) output_labels[labels == 0] = 0 if self.wants_image: output_labels = self.filter_using_image(workspace, mask) elif self.merge_option == UNIFY_PARENT: parents_name = self.parent_object.value parents_of = workspace.measurements[objects_name, "_".join( (C_PARENT, parents_name))] output_labels = labels.copy().astype(np.uint32) output_labels[labels > 0] = parents_of[labels[labels > 0] - 1] if self.merging_method == UM_CONVEX_HULL: ch_pts, n_pts = morph.convex_hull(output_labels) ijv = morph.fill_convex_hulls(ch_pts, n_pts) output_labels[ijv[:, 0], ijv[:, 1]] = ijv[:, 2] output_objects = cpo.Objects() output_objects.segmented = output_labels if objects.has_small_removed_segmented: output_objects.small_removed_segmented = \ copy_labels(objects.small_removed_segmented, output_labels) if objects.has_unedited_segmented: output_objects.unedited_segmented = \ copy_labels(objects.unedited_segmented, output_labels) output_objects.parent_image = objects.parent_image workspace.object_set.add_objects(output_objects, self.output_objects_name.value) measurements = workspace.measurements add_object_count_measurements(measurements, self.output_objects_name.value, np.max(output_objects.segmented)) add_object_location_measurements(measurements, self.output_objects_name.value, output_objects.segmented) # # Relate the output objects to the input ones and record # the relationship. # children_per_parent, parents_of_children = \ objects.relate_children(output_objects) measurements.add_measurement( self.objects_name.value, FF_CHILDREN_COUNT % self.output_objects_name.value, children_per_parent) measurements.add_measurement(self.output_objects_name.value, FF_PARENT % self.objects_name.value, parents_of_children) if self.show_window: workspace.display_data.orig_labels = objects.segmented workspace.display_data.output_labels = output_objects.segmented if self.merge_option == UNIFY_PARENT: workspace.display_data.parent_labels = \ workspace.object_set.get_objects(self.parent_object.value).segmented
def run(self, workspace): """ Run the module workspace - contains image_set - the images in the image set being processed object_set - the objects (labeled masks) in this image set measurements - the measurements for this run """ image_name = self.image_name.value cpimage = workspace.image_set.get_image(image_name) image = cpimage.pixel_data mask = cpimage.mask workspace.display_data.statistics = [] level = int(self.atrous_level.value) wavelet = self.a_trous(1.0 * image, level + 1) wlevprod = wavelet[:, :, level - 1] * 3.0 spotthresh = wlevprod.mean() + float( self.noise_removal_factor.value) * wlevprod.std() tidx = wlevprod < spotthresh wlevprod[tidx] = 0 wlevprod = self.circular_average_filter( wlevprod, int(self.smoothing_filter_size.value)) wlevprod = self.smooth_image(wlevprod, mask) max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod, 3) maxloc = (wlevprod == max_wlevprod) twlevprod = max_wlevprod > float(self.final_spot_threshold.value) maxloc[twlevprod == 0] = 0 labeled_image, object_count = scipy.ndimage.label( maxloc, np.ones((3, 3), bool)) unedited_labels = labeled_image.copy() # Filter out objects touching the border or mask border_excluded_labeled_image = labeled_image.copy() labeled_image = self.filter_on_border(image, labeled_image) border_excluded_labeled_image[labeled_image > 0] = 0 # Relabel the image labeled_image, object_count = relabel(labeled_image) new_labeled_image, new_object_count = self.limit_object_count( labeled_image, object_count) if new_object_count < object_count: # Add the labels that were filtered out into the border # image. border_excluded_mask = ((border_excluded_labeled_image > 0) | ((labeled_image > 0) & (new_labeled_image == 0))) border_excluded_labeled_image = scipy.ndimage.label( border_excluded_mask, np.ones((3, 3), bool))[0] object_count = new_object_count labeled_image = new_labeled_image # Make an outline image outline_image = cellprofiler.cpmath.outline.outline(labeled_image) outline_border_excluded_image = cellprofiler.cpmath.outline.outline( border_excluded_labeled_image) if self.show_window: statistics = workspace.display_data.statistics statistics.append(["# of accepted objects", "%d" % (object_count)]) workspace.display_data.image = image workspace.display_data.labeled_image = labeled_image workspace.display_data.border_excluded_labels = border_excluded_labeled_image # Add image measurements objname = self.object_name.value measurements = workspace.measurements cpmi.add_object_count_measurements(measurements, objname, object_count) # Add label matrices to the object set objects = cellprofiler.objects.Objects() objects.segmented = labeled_image objects.unedited_segmented = unedited_labels objects.parent_image = image workspace.object_set.add_objects(objects, self.object_name.value) cpmi.add_object_location_measurements(workspace.measurements, self.object_name.value, labeled_image) if self.should_save_outlines.value: out_img = cpi.Image(outline_image.astype(bool), parent_image=image) workspace.image_set.add(self.save_outlines.value, out_img)