def score_zernike(zf, radii, labels, indexes=None): """Score the output of construct_zernike_polynomials zf - the output of construct_zernike_polynomials which is I x J x K where K is the number of zernike polynomials computed radii - a vector of the radius of each of N labeled objects labels - a label matrix outputs a N x K matrix of the scores of each of the Zernikes for each labeled object. """ if indexes is None: indexes = np.arange(1, np.max(labels) + 1, dtype=np.int32) else: indexes = np.array(indexes, dtype=np.int32) radii = np.array(radii) k = zf.shape[2] n = np.product(radii.shape) score = np.zeros((n, k)) if n == 0: return score areas = radii**2 * np.pi for ki in range(k): zfk = zf[:, :, ki] real_score = scipy.ndimage.sum(zfk.real, labels, indexes) real_score = fixup_scipy_ndimage_result(real_score) imag_score = scipy.ndimage.sum(zfk.imag, labels, indexes) imag_score = fixup_scipy_ndimage_result(imag_score) one_score = np.sqrt(real_score**2 + imag_score**2) / areas score[:, ki] = one_score return score
def score_zernike(zf, radii, labels, indexes=None): """Score the output of construct_zernike_polynomials zf - the output of construct_zernike_polynomials which is I x J x K where K is the number of zernike polynomials computed radii - a vector of the radius of each of N labeled objects labels - a label matrix outputs a N x K matrix of the scores of each of the Zernikes for each labeled object. """ if indexes is None: indexes = np.arange(1, np.max(labels) + 1, dtype=np.int32) else: indexes = np.array(indexes, dtype=np.int32) radii = np.array(radii) k = zf.shape[2] n = np.product(radii.shape) score = np.zeros((n, k)) if n == 0: return score areas = radii ** 2 * np.pi for ki in range(k): zfk = zf[:, :, ki] real_score = scipy.ndimage.sum(zfk.real, labels, indexes) real_score = fixup_scipy_ndimage_result(real_score) imag_score = scipy.ndimage.sum(zfk.imag, labels, indexes) imag_score = fixup_scipy_ndimage_result(imag_score) one_score = np.sqrt(real_score ** 2 + imag_score ** 2) / areas score[:, ki] = one_score return score
def filter_using_image(self, workspace, mask): '''Filter out connections using local intensity minima between objects workspace - the workspace for the image set mask - mask of background points within the minimum distance ''' # # NOTE: This is an efficient implementation and an improvement # in accuracy over the Matlab version. It would be faster and # more accurate to eliminate the line-connecting and instead # do the following: # * Distance transform to get the coordinates of the closest # point in an object for points in the background that are # at most 1/2 of the max distance between objects. # * Take the intensity at this closest point and similarly # label the background point if the background intensity # is at least the minimum intensity fraction # * Assume there is a connection between objects if, after this # labeling, there are adjacent points in each object. # # As it is, the algorithm duplicates the Matlab version but suffers # for cells whose intensity isn't high in the centroid and clearly # suffers when two cells touch at some point that's off of the line # between the two. # objects = workspace.object_set.get_objects(self.objects_name.value) labels = objects.segmented image = self.get_image(workspace) if self.show_window: # Save the image for display workspace.display_data.image = image # # Do a distance transform into the background to label points # in the background with their closest foreground object # i, j = scind.distance_transform_edt(labels == 0, return_indices=True, return_distances=False) confluent_labels = labels[i, j] confluent_labels[~mask] = 0 if self.where_algorithm == CA_CLOSEST_POINT: # # For the closest point method, find the intensity at # the closest point in the object (which will be the point itself # for points in the object). # object_intensity = image[i, j] * self.minimum_intensity_fraction.value confluent_labels[object_intensity > image] = 0 count, index, c_j = morph.find_neighbors(confluent_labels) if len(c_j) == 0: # Nobody touches - return the labels matrix return labels # # Make a row of i matching the touching j # c_i = np.zeros(len(c_j)) # # Eliminate labels without matches # label_numbers = np.arange(1, len(count) + 1)[count > 0] index = index[count > 0] count = count[count > 0] # # Get the differences between labels so we can use a cumsum trick # to increment to the next label when they change # label_numbers[1:] = label_numbers[1:] - label_numbers[:-1] c_i[index] = label_numbers c_i = np.cumsum(c_i).astype(int) if self.where_algorithm == CA_CENTROIDS: # # Only connect points > minimum intensity fraction # center_i, center_j = morph.centers_of_labels(labels) indexes, counts, i, j = morph.get_line_pts(center_i[c_i - 1], center_j[c_i - 1], center_i[c_j - 1], center_j[c_j - 1]) # # The indexes of the centroids at pt1 # last_indexes = indexes + counts - 1 # # The minimum of the intensities at pt0 and pt1 # centroid_intensities = np.minimum( image[i[indexes], j[indexes]], image[i[last_indexes], j[last_indexes]]) # # Assign label numbers to each point so we can use # scipy.ndimage.minimum. The label numbers are indexes into # "connections" above. # pt_labels = np.zeros(len(i), int) pt_labels[indexes[1:]] = 1 pt_labels = np.cumsum(pt_labels) minima = scind.minimum(image[i, j], pt_labels, np.arange(len(indexes))) minima = morph.fixup_scipy_ndimage_result(minima) # # Filter the connections using the image # mif = self.minimum_intensity_fraction.value i = c_i[centroid_intensities * mif <= minima] j = c_j[centroid_intensities * mif <= minima] else: i = c_i j = c_j # # Add in connections from self to self # unique_labels = np.unique(labels) i = np.hstack((i, unique_labels)) j = np.hstack((j, unique_labels)) # # Run "all_connected_components" to get a component # for # objects identified as same. # new_indexes = morph.all_connected_components(i, j) new_labels = np.zeros(labels.shape, int) new_labels[labels != 0] = new_indexes[labels[labels != 0]] return new_labels
def run(self, workspace): """Run the algorithm on one image set""" # # Get the image as a binary image # image_set = workspace.image_set image = image_set.get_image(self.image_name.value, must_be_binary=True) mask = image.pixel_data if image.has_mask: mask = mask & image.mask angle_count = self.angle_count.value # # We collect the i,j and angle of pairs of points that # are 3-d adjacent after erosion. # # i - the i coordinate of each point found after erosion # j - the j coordinate of each point found after erosion # a - the angle of the structuring element for each point found # i = numpy.zeros(0, int) j = numpy.zeros(0, int) a = numpy.zeros(0, int) ig, jg = numpy.mgrid[0 : mask.shape[0], 0 : mask.shape[1]] this_idx = 0 for angle_number in range(angle_count): angle = float(angle_number) * numpy.pi / float(angle_count) strel = self.get_diamond(angle) erosion = binary_erosion(mask, strel) # # Accumulate the count, i, j and angle for all foreground points # in the erosion # this_count = numpy.sum(erosion) i = numpy.hstack((i, ig[erosion])) j = numpy.hstack((j, jg[erosion])) a = numpy.hstack((a, numpy.ones(this_count, float) * angle)) # # Find connections based on distances, not adjacency # first, second = self.find_adjacent_by_distance(i, j, a) # # Do all connected components. # if len(first) > 0: ij_labels = all_connected_components(first, second) + 1 nlabels = numpy.max(ij_labels) label_indexes = numpy.arange(1, nlabels + 1) # # Compute the measurements # center_x = fixup_scipy_ndimage_result( mean_of_labels(j, ij_labels, label_indexes) ) center_y = fixup_scipy_ndimage_result( mean_of_labels(i, ij_labels, label_indexes) ) # # The angles are wierdly complicated because of the wrap-around. # You can imagine some horrible cases, like a circular patch of # "worm" in which all angles are represented or a gentle "U" # curve. # # For now, I'm going to use the following heuristic: # # Compute two different "angles". The angles of one go # from 0 to 180 and the angles of the other go from -90 to 90. # Take the variance of these from the mean and # choose the representation with the lowest variance. # # An alternative would be to compute the variance at each possible # dividing point. Another alternative would be to actually trace through # the connected components - both overkill for such an inconsequential # measurement I hope. # angles = fixup_scipy_ndimage_result( mean_of_labels(a, ij_labels, label_indexes) ) vangles = fixup_scipy_ndimage_result( mean_of_labels( (a - angles[ij_labels - 1]) ** 2, ij_labels, label_indexes ) ) aa = a.copy() aa[a > numpy.pi / 2] -= numpy.pi aangles = fixup_scipy_ndimage_result( mean_of_labels(aa, ij_labels, label_indexes) ) vaangles = fixup_scipy_ndimage_result( mean_of_labels( (aa - aangles[ij_labels - 1]) ** 2, ij_labels, label_indexes ) ) aangles[aangles < 0] += numpy.pi angles[vaangles < vangles] = aangles[vaangles < vangles] # # Squish the labels to 2-d. The labels for overlaps are arbitrary. # labels = numpy.zeros(mask.shape, int) labels[i, j] = ij_labels else: center_x = numpy.zeros(0, int) center_y = numpy.zeros(0, int) angles = numpy.zeros(0) nlabels = 0 label_indexes = numpy.zeros(0, int) labels = numpy.zeros(mask.shape, int) m = workspace.measurements assert isinstance(m, Measurements) object_name = self.object_name.value m.add_measurement(object_name, M_LOCATION_CENTER_X, center_x) m.add_measurement(object_name, M_LOCATION_CENTER_Y, center_y) m.add_measurement(object_name, M_ANGLE, angles * 180 / numpy.pi) m.add_measurement( object_name, M_NUMBER_OBJECT_NUMBER, label_indexes, ) m.add_image_measurement(FF_COUNT % object_name, nlabels) # # Make the objects # object_set = workspace.object_set assert isinstance(object_set, ObjectSet) objects = Objects() objects.segmented = labels objects.parent_image = image object_set.add_objects(objects, object_name) if self.show_window: workspace.display_data.i = center_y workspace.display_data.j = center_x workspace.display_data.angle = angles workspace.display_data.mask = mask workspace.display_data.labels = labels workspace.display_data.count = nlabels
def filter_using_image(self, workspace, mask): '''Filter out connections using local intensity minima between objects workspace - the workspace for the image set mask - mask of background points within the minimum distance ''' # # NOTE: This is an efficient implementation and an improvement # in accuracy over the Matlab version. It would be faster and # more accurate to eliminate the line-connecting and instead # do the following: # * Distance transform to get the coordinates of the closest # point in an object for points in the background that are # at most 1/2 of the max distance between objects. # * Take the intensity at this closest point and similarly # label the background point if the background intensity # is at least the minimum intensity fraction # * Assume there is a connection between objects if, after this # labeling, there are adjacent points in each object. # # As it is, the algorithm duplicates the Matlab version but suffers # for cells whose intensity isn't high in the centroid and clearly # suffers when two cells touch at some point that's off of the line # between the two. # objects = workspace.object_set.get_objects(self.objects_name.value) labels = objects.segmented image = self.get_image(workspace) if self.show_window: # Save the image for display workspace.display_data.image = image # # Do a distance transform into the background to label points # in the background with their closest foreground object # i, j = scind.distance_transform_edt(labels==0, return_indices=True, return_distances=False) confluent_labels = labels[i,j] confluent_labels[~mask] = 0 if self.where_algorithm == CA_CLOSEST_POINT: # # For the closest point method, find the intensity at # the closest point in the object (which will be the point itself # for points in the object). # object_intensity = image[i,j] * self.minimum_intensity_fraction.value confluent_labels[object_intensity > image] = 0 count, index, c_j = morph.find_neighbors(confluent_labels) if len(c_j) == 0: # Nobody touches - return the labels matrix return labels # # Make a row of i matching the touching j # c_i = np.zeros(len(c_j)) # # Eliminate labels without matches # label_numbers = np.arange(1,len(count)+1)[count > 0] index = index[count > 0] count = count[count > 0] # # Get the differences between labels so we can use a cumsum trick # to increment to the next label when they change # label_numbers[1:] = label_numbers[1:] - label_numbers[:-1] c_i[index] = label_numbers c_i = np.cumsum(c_i).astype(int) if self.where_algorithm == CA_CENTROIDS: # # Only connect points > minimum intensity fraction # center_i, center_j = morph.centers_of_labels(labels) indexes, counts, i, j = morph.get_line_pts( center_i[c_i-1], center_j[c_i-1], center_i[c_j-1], center_j[c_j-1]) # # The indexes of the centroids at pt1 # last_indexes = indexes+counts-1 # # The minimum of the intensities at pt0 and pt1 # centroid_intensities = np.minimum( image[i[indexes],j[indexes]], image[i[last_indexes], j[last_indexes]]) # # Assign label numbers to each point so we can use # scipy.ndimage.minimum. The label numbers are indexes into # "connections" above. # pt_labels = np.zeros(len(i), int) pt_labels[indexes[1:]] = 1 pt_labels = np.cumsum(pt_labels) minima = scind.minimum(image[i,j], pt_labels, np.arange(len(indexes))) minima = morph.fixup_scipy_ndimage_result(minima) # # Filter the connections using the image # mif = self.minimum_intensity_fraction.value i = c_i[centroid_intensities * mif <= minima] j = c_j[centroid_intensities * mif <= minima] else: i = c_i j = c_j # # Add in connections from self to self # unique_labels = np.unique(labels) i = np.hstack((i, unique_labels)) j = np.hstack((j, unique_labels)) # # Run "all_connected_components" to get a component # for # objects identified as same. # new_indexes = morph.all_connected_components(i, j) new_labels = np.zeros(labels.shape, int) new_labels[labels != 0] = new_indexes[labels[labels != 0]] return new_labels
def run(self, workspace): """Run the module on an image set""" object_name = self.object_name.value remaining_object_name = self.remaining_objects.value original_objects = workspace.object_set.get_objects(object_name) if self.mask_choice == MC_IMAGE: mask = workspace.image_set.get_image( self.masking_image.value, must_be_binary=True ) mask = mask.pixel_data else: masking_objects = workspace.object_set.get_objects( self.masking_objects.value ) mask = masking_objects.segmented > 0 if self.wants_inverted_mask: mask = ~mask # # Load the labels # labels = original_objects.segmented.copy() nobjects = numpy.max(labels) # # Resize the mask to cover the objects # mask, m1 = size_similarly(labels, mask) mask[~m1] = False # # Apply the mask according to the overlap choice. # if nobjects == 0: pass elif self.overlap_choice == P_MASK: labels = labels * mask else: pixel_counts = fixup_scipy_ndimage_result( scipy.ndimage.sum( mask, labels, numpy.arange(1, nobjects + 1, dtype=numpy.int32) ) ) if self.overlap_choice == P_KEEP: keep = pixel_counts > 0 else: total_pixels = fixup_scipy_ndimage_result( scipy.ndimage.sum( numpy.ones(labels.shape), labels, numpy.arange(1, nobjects + 1, dtype=numpy.int32), ) ) if self.overlap_choice == P_REMOVE: keep = pixel_counts == total_pixels elif self.overlap_choice == P_REMOVE_PERCENTAGE: fraction = self.overlap_fraction.value keep = pixel_counts / total_pixels >= fraction else: raise NotImplementedError( "Unknown overlap-handling choice: %s", self.overlap_choice.value ) keep = numpy.hstack(([False], keep)) labels[~keep[labels]] = 0 # # Renumber the labels matrix if requested # if self.retain_or_renumber == R_RENUMBER: unique_labels = numpy.unique(labels[labels != 0]) indexer = numpy.zeros(nobjects + 1, int) indexer[unique_labels] = numpy.arange(1, len(unique_labels) + 1) labels = indexer[labels] parent_objects = unique_labels else: parent_objects = numpy.arange(1, nobjects + 1) # # Add the objects # remaining_objects = Objects() remaining_objects.segmented = labels remaining_objects.unedited_segmented = original_objects.unedited_segmented workspace.object_set.add_objects(remaining_objects, remaining_object_name) # # Add measurements # m = workspace.measurements m.add_measurement( remaining_object_name, FF_PARENT % object_name, parent_objects, ) if numpy.max(original_objects.segmented) == 0: child_count = numpy.array([], int) else: child_count = fixup_scipy_ndimage_result( scipy.ndimage.sum( labels, original_objects.segmented, numpy.arange(1, nobjects + 1, dtype=numpy.int32), ) ) child_count = (child_count > 0).astype(int) m.add_measurement( object_name, FF_CHILDREN_COUNT % remaining_object_name, child_count, ) if self.retain_or_renumber == R_RETAIN: remaining_object_count = nobjects else: remaining_object_count = len(unique_labels) add_object_count_measurements(m, remaining_object_name, remaining_object_count) add_object_location_measurements(m, remaining_object_name, labels) # # Save the input, mask and output images for display # if self.show_window: workspace.display_data.original_labels = original_objects.segmented workspace.display_data.final_labels = labels workspace.display_data.mask = mask