def filter_using_image(self, workspace, mask):
     '''Filter out connections using local intensity minima between objects
     
     workspace - the workspace for the image set
     mask - mask of background points within the minimum distance
     '''
     #
     # NOTE: This is an efficient implementation and an improvement
     #       in accuracy over the Matlab version. It would be faster and
     #       more accurate to eliminate the line-connecting and instead
     #       do the following:
     #     * Distance transform to get the coordinates of the closest
     #       point in an object for points in the background that are
     #       at most 1/2 of the max distance between objects.
     #     * Take the intensity at this closest point and similarly
     #       label the background point if the background intensity
     #       is at least the minimum intensity fraction
     #     * Assume there is a connection between objects if, after this
     #       labeling, there are adjacent points in each object.
     #
     # As it is, the algorithm duplicates the Matlab version but suffers
     # for cells whose intensity isn't high in the centroid and clearly
     # suffers when two cells touch at some point that's off of the line
     # between the two.
     #
     objects = workspace.object_set.get_objects(self.objects_name.value)
     labels = objects.segmented
     image = self.get_image(workspace)
     if self.show_window:
         # Save the image for display
         workspace.display_data.image = image
     #
     # Do a distance transform into the background to label points
     # in the background with their closest foreground object
     #
     i, j = scind.distance_transform_edt(labels==0, 
                                         return_indices=True,
                                         return_distances=False)
     confluent_labels = labels[i,j]
     confluent_labels[~mask] = 0
     if self.where_algorithm == CA_CLOSEST_POINT:
         #
         # For the closest point method, find the intensity at
         # the closest point in the object (which will be the point itself
         # for points in the object).
         # 
         object_intensity = image[i,j] * self.minimum_intensity_fraction.value
         confluent_labels[object_intensity > image] = 0
     count, index, c_j = morph.find_neighbors(confluent_labels)
     if len(c_j) == 0:
         # Nobody touches - return the labels matrix
         return labels
     #
     # Make a row of i matching the touching j
     #
     c_i = np.zeros(len(c_j))
     #
     # Eliminate labels without matches
     #
     label_numbers = np.arange(1,len(count)+1)[count > 0]
     index = index[count > 0]
     count = count[count > 0]
     #
     # Get the differences between labels so we can use a cumsum trick
     # to increment to the next label when they change
     #
     label_numbers[1:] = label_numbers[1:] - label_numbers[:-1]
     c_i[index] = label_numbers
     c_i = np.cumsum(c_i).astype(int)
     if self.where_algorithm == CA_CENTROIDS:
         #
         # Only connect points > minimum intensity fraction
         #
         center_i, center_j = morph.centers_of_labels(labels)
         indexes, counts, i, j = morph.get_line_pts(
             center_i[c_i-1], center_j[c_i-1],
             center_i[c_j-1], center_j[c_j-1])
         #
         # The indexes of the centroids at pt1
         #
         last_indexes = indexes+counts-1
         #
         # The minimum of the intensities at pt0 and pt1
         #
         centroid_intensities = np.minimum(
             image[i[indexes],j[indexes]],
             image[i[last_indexes], j[last_indexes]])
         #
         # Assign label numbers to each point so we can use
         # scipy.ndimage.minimum. The label numbers are indexes into
         # "connections" above.
         #
         pt_labels = np.zeros(len(i), int)
         pt_labels[indexes[1:]] = 1
         pt_labels = np.cumsum(pt_labels)
         minima = scind.minimum(image[i,j], pt_labels, np.arange(len(indexes)))
         minima = morph.fixup_scipy_ndimage_result(minima)
         #
         # Filter the connections using the image
         #
         mif = self.minimum_intensity_fraction.value
         i = c_i[centroid_intensities * mif <= minima]
         j = c_j[centroid_intensities * mif <= minima]
     else:
         i = c_i
         j = c_j
     #
     # Add in connections from self to self
     #
     unique_labels = np.unique(labels)
     i = np.hstack((i, unique_labels))
     j = np.hstack((j, unique_labels))
     #
     # Run "all_connected_components" to get a component # for
     # objects identified as same.
     #
     new_indexes = morph.all_connected_components(i, j)
     new_labels = np.zeros(labels.shape, int)
     new_labels[labels != 0] = new_indexes[labels[labels != 0]]
     return new_labels
示例#2
0
 def filter_using_image(self, workspace, mask):
     '''Filter out connections using local intensity minima between objects
     
     workspace - the workspace for the image set
     mask - mask of background points within the minimum distance
     '''
     #
     # NOTE: This is an efficient implementation and an improvement
     #       in accuracy over the Matlab version. It would be faster and
     #       more accurate to eliminate the line-connecting and instead
     #       do the following:
     #     * Distance transform to get the coordinates of the closest
     #       point in an object for points in the background that are
     #       at most 1/2 of the max distance between objects.
     #     * Take the intensity at this closest point and similarly
     #       label the background point if the background intensity
     #       is at least the minimum intensity fraction
     #     * Assume there is a connection between objects if, after this
     #       labeling, there are adjacent points in each object.
     #
     # As it is, the algorithm duplicates the Matlab version but suffers
     # for cells whose intensity isn't high in the centroid and clearly
     # suffers when two cells touch at some point that's off of the line
     # between the two.
     #
     objects = workspace.object_set.get_objects(self.objects_name.value)
     labels = objects.segmented
     image = self.get_image(workspace)
     if self.show_window:
         # Save the image for display
         workspace.display_data.image = image
     #
     # Do a distance transform into the background to label points
     # in the background with their closest foreground object
     #
     i, j = scind.distance_transform_edt(labels == 0,
                                         return_indices=True,
                                         return_distances=False)
     confluent_labels = labels[i, j]
     confluent_labels[~mask] = 0
     if self.where_algorithm == CA_CLOSEST_POINT:
         #
         # For the closest point method, find the intensity at
         # the closest point in the object (which will be the point itself
         # for points in the object).
         #
         object_intensity = image[i,
                                  j] * self.minimum_intensity_fraction.value
         confluent_labels[object_intensity > image] = 0
     count, index, c_j = morph.find_neighbors(confluent_labels)
     if len(c_j) == 0:
         # Nobody touches - return the labels matrix
         return labels
     #
     # Make a row of i matching the touching j
     #
     c_i = np.zeros(len(c_j))
     #
     # Eliminate labels without matches
     #
     label_numbers = np.arange(1, len(count) + 1)[count > 0]
     index = index[count > 0]
     count = count[count > 0]
     #
     # Get the differences between labels so we can use a cumsum trick
     # to increment to the next label when they change
     #
     label_numbers[1:] = label_numbers[1:] - label_numbers[:-1]
     c_i[index] = label_numbers
     c_i = np.cumsum(c_i).astype(int)
     if self.where_algorithm == CA_CENTROIDS:
         #
         # Only connect points > minimum intensity fraction
         #
         center_i, center_j = morph.centers_of_labels(labels)
         indexes, counts, i, j = morph.get_line_pts(center_i[c_i - 1],
                                                    center_j[c_i - 1],
                                                    center_i[c_j - 1],
                                                    center_j[c_j - 1])
         #
         # The indexes of the centroids at pt1
         #
         last_indexes = indexes + counts - 1
         #
         # The minimum of the intensities at pt0 and pt1
         #
         centroid_intensities = np.minimum(
             image[i[indexes], j[indexes]], image[i[last_indexes],
                                                  j[last_indexes]])
         #
         # Assign label numbers to each point so we can use
         # scipy.ndimage.minimum. The label numbers are indexes into
         # "connections" above.
         #
         pt_labels = np.zeros(len(i), int)
         pt_labels[indexes[1:]] = 1
         pt_labels = np.cumsum(pt_labels)
         minima = scind.minimum(image[i, j], pt_labels,
                                np.arange(len(indexes)))
         minima = morph.fixup_scipy_ndimage_result(minima)
         #
         # Filter the connections using the image
         #
         mif = self.minimum_intensity_fraction.value
         i = c_i[centroid_intensities * mif <= minima]
         j = c_j[centroid_intensities * mif <= minima]
     else:
         i = c_i
         j = c_j
     #
     # Add in connections from self to self
     #
     unique_labels = np.unique(labels)
     i = np.hstack((i, unique_labels))
     j = np.hstack((j, unique_labels))
     #
     # Run "all_connected_components" to get a component # for
     # objects identified as same.
     #
     new_indexes = morph.all_connected_components(i, j)
     new_labels = np.zeros(labels.shape, int)
     new_labels[labels != 0] = new_indexes[labels[labels != 0]]
     return new_labels
示例#3
0
    def run(self, workspace):
        '''Run the algorithm on one image set'''
        #
        # Get the image as a binary image
        #
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value, must_be_binary=True)
        mask = image.pixel_data
        if image.has_mask:
            mask = mask & image.mask
        angle_count = self.angle_count.value
        #
        # We collect the i,j and angle of pairs of points that
        # are 3-d adjacent after erosion.
        #
        # i - the i coordinate of each point found after erosion
        # j - the j coordinate of each point found after erosion
        # a - the angle of the structuring element for each point found
        #
        i = np.zeros(0, int)
        j = np.zeros(0, int)
        a = np.zeros(0, int)

        ig, jg = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
        this_idx = 0
        for angle_number in range(angle_count):
            angle = float(angle_number) * np.pi / float(angle_count)
            strel = self.get_diamond(angle)
            erosion = binary_erosion(mask, strel)
            #
            # Accumulate the count, i, j and angle for all foreground points
            # in the erosion
            #
            this_count = np.sum(erosion)
            i = np.hstack((i, ig[erosion]))
            j = np.hstack((j, jg[erosion]))
            a = np.hstack((a, np.ones(this_count, float) * angle))
        #
        # Find connections based on distances, not adjacency
        #
        first, second = self.find_adjacent_by_distance(i, j, a)
        #
        # Do all connected components.
        #
        if len(first) > 0:
            ij_labels = all_connected_components(first, second) + 1
            nlabels = np.max(ij_labels)
            label_indexes = np.arange(1, nlabels + 1)
            #
            # Compute the measurements
            #
            center_x = fix(mean_of_labels(j, ij_labels, label_indexes))
            center_y = fix(mean_of_labels(i, ij_labels, label_indexes))
            #
            # The angles are wierdly complicated because of the wrap-around.
            # You can imagine some horrible cases, like a circular patch of
            # "worm" in which all angles are represented or a gentle "U"
            # curve.
            #
            # For now, I'm going to use the following heuristic:
            #
            # Compute two different "angles". The angles of one go
            # from 0 to 180 and the angles of the other go from -90 to 90.
            # Take the variance of these from the mean and
            # choose the representation with the lowest variance.
            #
            # An alternative would be to compute the variance at each possible
            # dividing point. Another alternative would be to actually trace through
            # the connected components - both overkill for such an inconsequential
            # measurement I hope.
            #
            angles = fix(mean_of_labels(a, ij_labels, label_indexes))
            vangles = fix(
                mean_of_labels((a - angles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aa = a.copy()
            aa[a > np.pi / 2] -= np.pi
            aangles = fix(mean_of_labels(aa, ij_labels, label_indexes))
            vaangles = fix(
                mean_of_labels((aa - aangles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aangles[aangles < 0] += np.pi
            angles[vaangles < vangles] = aangles[vaangles < vangles]
            #
            # Squish the labels to 2-d. The labels for overlaps are arbitrary.
            #
            labels = np.zeros(mask.shape, int)
            labels[i, j] = ij_labels
        else:
            center_x = np.zeros(0, int)
            center_y = np.zeros(0, int)
            angles = np.zeros(0)
            nlabels = 0
            label_indexes = np.zeros(0, int)
            labels = np.zeros(mask.shape, int)

        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        object_name = self.object_name.value
        m.add_measurement(object_name, I.M_LOCATION_CENTER_X, center_x)
        m.add_measurement(object_name, I.M_LOCATION_CENTER_Y, center_y)
        m.add_measurement(object_name, M_ANGLE, angles * 180 / np.pi)
        m.add_measurement(object_name, I.M_NUMBER_OBJECT_NUMBER, label_indexes)
        m.add_image_measurement(I.FF_COUNT % object_name, nlabels)
        #
        # Make the objects
        #
        object_set = workspace.object_set
        assert isinstance(object_set, cpo.ObjectSet)
        objects = cpo.Objects()
        objects.segmented = labels
        objects.parent_image = image
        object_set.add_objects(objects, object_name)
        if self.show_window:
            workspace.display_data.i = center_y
            workspace.display_data.j = center_x
            workspace.display_data.angle = angles
            workspace.display_data.mask = mask
            workspace.display_data.labels = labels
            workspace.display_data.count = nlabels
 def run(self, workspace):
     '''Run the algorithm on one image set'''
     #
     # Get the image as a binary image
     #
     image_set = workspace.image_set
     image = image_set.get_image(self.image_name.value,
                                 must_be_binary = True)
     mask = image.pixel_data
     if image.has_mask:
         mask = mask & image.mask
     angle_count = self.angle_count.value
     #
     # We collect the i,j and angle of pairs of points that
     # are 3-d adjacent after erosion.
     #
     # i - the i coordinate of each point found after erosion
     # j - the j coordinate of each point found after erosion
     # a - the angle of the structuring element for each point found
     #
     i = np.zeros(0, int)
     j = np.zeros(0, int)
     a = np.zeros(0, int)
     
     ig, jg = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
     this_idx = 0
     for angle_number in range(angle_count):
         angle = float(angle_number) * np.pi / float(angle_count)
         strel = self.get_diamond(angle)
         erosion = binary_erosion(mask, strel)
         #
         # Accumulate the count, i, j and angle for all foreground points
         # in the erosion
         #
         this_count = np.sum(erosion)
         i = np.hstack((i, ig[erosion]))
         j = np.hstack((j, jg[erosion]))
         a = np.hstack((a, np.ones(this_count, float) * angle))
     #
     # Find connections based on distances, not adjacency
     #
     first, second = self.find_adjacent_by_distance(i,j,a)
     #
     # Do all connected components.
     #
     if len(first) > 0:
         ij_labels = all_connected_components(first, second) + 1
         nlabels = np.max(ij_labels)
         label_indexes = np.arange(1, nlabels + 1)
         #
         # Compute the measurements
         #
         center_x = fix(mean_of_labels(j, ij_labels, label_indexes))
         center_y = fix(mean_of_labels(i, ij_labels, label_indexes))
         #
         # The angles are wierdly complicated because of the wrap-around.
         # You can imagine some horrible cases, like a circular patch of
         # "worm" in which all angles are represented or a gentle "U"
         # curve.
         #
         # For now, I'm going to use the following heuristic:
         #
         # Compute two different "angles". The angles of one go
         # from 0 to 180 and the angles of the other go from -90 to 90.
         # Take the variance of these from the mean and
         # choose the representation with the lowest variance.
         #
         # An alternative would be to compute the variance at each possible
         # dividing point. Another alternative would be to actually trace through
         # the connected components - both overkill for such an inconsequential
         # measurement I hope.
         #
         angles = fix(mean_of_labels(a, ij_labels, label_indexes))
         vangles = fix(mean_of_labels((a - angles[ij_labels-1])**2, 
                                      ij_labels, label_indexes))
         aa = a.copy()
         aa[a > np.pi / 2] -= np.pi
         aangles = fix(mean_of_labels(aa, ij_labels, label_indexes))
         vaangles = fix(mean_of_labels((aa-aangles[ij_labels-1])**2,
                                       ij_labels, label_indexes))
         aangles[aangles < 0] += np.pi
         angles[vaangles < vangles] = aangles[vaangles < vangles]
         #
         # Squish the labels to 2-d. The labels for overlaps are arbitrary.
         #
         labels = np.zeros(mask.shape, int)
         labels[i, j] = ij_labels
     else:
         center_x = np.zeros(0, int)
         center_y = np.zeros(0, int)
         angles = np.zeros(0)
         nlabels = 0
         label_indexes = np.zeros(0, int)
         labels = np.zeros(mask.shape, int)
     
     m = workspace.measurements
     assert isinstance(m, cpmeas.Measurements)
     object_name = self.object_name.value
     m.add_measurement(object_name, I.M_LOCATION_CENTER_X, center_x)
     m.add_measurement(object_name, I.M_LOCATION_CENTER_Y, center_y)
     m.add_measurement(object_name, M_ANGLE, angles * 180 / np.pi)
     m.add_measurement(object_name, I.M_NUMBER_OBJECT_NUMBER, label_indexes)
     m.add_image_measurement(I.FF_COUNT % object_name, nlabels)
     #
     # Make the objects
     #
     object_set = workspace.object_set
     assert isinstance(object_set, cpo.ObjectSet)
     objects = cpo.Objects()
     objects.segmented = labels
     objects.parent_image = image
     object_set.add_objects(objects, object_name)
     if self.show_window:
         workspace.display_data.i = center_y
         workspace.display_data.j = center_x
         workspace.display_data.angle = angles
         workspace.display_data.mask = mask
         workspace.display_data.labels = labels
         workspace.display_data.count = nlabels
示例#5
0
 def run(self, workspace):
     import cellprofiler.modules.identify as I
     #
     # Get the input and output image names. You need to get the .value
     # because otherwise you'll get the setting object instead of
     # the string name.
     #
     input_image_name = self.input_image_name.value
     prediction_image_name = self.prediction_image_name.value
     input_objects_name = self.input_objects.value
     output_objects_name = self.output_objects.value
     #
     # Get the image set. The image set has all of the images in it.
     # The assert statement makes sure that it really is an image set,
     # but, more importantly, it lets my editor do context-sensitive
     # completion for the image set.
     #
     image_set = workspace.image_set
     assert isinstance(image_set, cpi.ImageSet)
     #
     # Get the input image object. We want a grayscale image here.
     # The image set will convert a color image to a grayscale one
     # and warn the user.
     #
     input_image = image_set.get_image(input_image_name,
                                       must_be_grayscale = True).pixel_data
     #
     # I do something a little odd here and elsewhere. I normalize the
     # brightness by ordering the image pixels by brightness. I notice that
     # the samples vary in intensity (why?) and EM is a scanning technology
     # (right?) so there should be uniform illumination across an image.
     #
     r = np.random.RandomState()
     r.seed(np.sum((input_image * 65535).astype(np.uint16)))
     npixels = np.prod(input_image.shape)
     shape = input_image.shape
     order = np.lexsort([r.uniform(size=npixels),
                         input_image.flatten()])
     input_image = np.zeros(npixels)
     input_image[order] = np.arange(npixels).astype(float) / npixels
     input_image.shape = shape
     
     prediction_image = image_set.get_image(prediction_image_name,
                                            must_be_grayscale=True).pixel_data
     object_set = workspace.object_set
     assert isinstance(object_set, cpo.ObjectSet)
     input_objects = object_set.get_objects(input_objects_name)
     assert isinstance(input_objects, cpo.Objects)
     input_labeling = input_objects.segmented
     #
     # Find the border pixels - 4 connected
     #
     # There will be some repeats in here - I'm being lazy and I'm not
     # removing them. The inaccuracies will be random.
     #
     touch = ((input_labeling[1:, :] != input_labeling[:-1, :]) &
              (input_labeling[1:, :] != 0) &
              (input_labeling[:-1, :] != 0))
     touchpair = np.argwhere(touch)
     touchpair = np.column_stack([
         # touchpair[:,0:2] are the left coords
         touchpair, 
         # touchpair[:,2:4] are the right coords
         touchpair + np.array([1,0], int)[np.newaxis,:],
         # touchpair[:,4] is the identity of the left object
         input_labeling[touchpair[:,0], touchpair[:, 1]],
         # touchpair[:,5] is the identity of the right object
         input_labeling[touchpair[:,0]+1, touchpair[:, 1]]])
     TP_I0 = 0
     TP_J0 = 1
     TP_I1 = 2
     TP_J1 = 3
     TP_L0 = 4
     TP_L1 = 5
         
     touch = ((input_labeling[:, 1:] != input_labeling[:, :-1]) &
               (input_labeling[:, 1:] != 0) &
               (input_labeling[:, :-1] != 0))
     tp2 = np.argwhere(touch)
     
     touchpair = np.vstack([touchpair, np.column_stack([
         tp2, 
         tp2 + np.array([0, 1], int)[np.newaxis,:],
         input_labeling[tp2[:, 0], tp2[:, 1]],
         input_labeling[tp2[:, 0], tp2[:, 1]+1]])])
     if np.any(touch):
         #
         # Broadcast the touchpair counts and scores into sparse arrays.
         # The sparse array convention is to sum duplicates
         #
         counts = coo_matrix(
             (np.ones(touchpair.shape[0], int),
              (touchpair[:,TP_L0], touchpair[:,TP_L1])),
             shape=[input_objects.count+1]*2).toarray()
         scores = coo_matrix(
             (prediction_image[touchpair[:, TP_I0],
                               touchpair[:, TP_J0]] +
              prediction_image[touchpair[:, TP_I1],
                               touchpair[:, TP_J1]],
              (touchpair[:,TP_L0], touchpair[:,TP_L1])),
             shape=[input_objects.count+1]*2).toarray() / 2.0
         scores = scores / counts
         to_remove = ((counts > self.min_border.value) &
                      (scores > 1 - self.min_support.value))
     else:
         to_remove = np.zeros((0,2), bool)
     #
     # For all_connected_components, do forward and backward links and
     # self-to-self links
     #
     remove_pairs = np.vstack([
         np.argwhere(to_remove), 
         np.argwhere(to_remove.transpose()),
         np.column_stack([np.arange(np.max(input_labeling)+1)] * 2)])
     #
     # Find small objects and dark objects
     #
     areas = np.bincount(input_labeling.flatten())
     brightness = np.bincount(input_labeling.flatten(), input_image.flatten())
     brightness = brightness / areas
     #
     to_remove = ((areas < self.min_area.value) | 
                  (brightness < self.darkness.value))
     to_remove[0] = False
     #
     # Find the biggest neighbor to all. If no neighbors, label = 0
     #
     largest = np.zeros(areas.shape[0], np.uint32)
     if np.any(touch):
         largest[:counts.shape[1]] = \
             np.argmax(np.maximum(counts, counts.transpose()), 0)
     remove_pairs = np.vstack([
         remove_pairs,
         np.column_stack([np.arange(len(to_remove))[to_remove],
                          largest[to_remove]])])
     lnumbers = all_connected_components(remove_pairs[:, 0], 
                                         remove_pairs[:, 1]).astype(np.uint32)
     #
     # Renumber.
     #
     output_labeling = lnumbers[input_labeling]
     #
     # Fill holes.
     #
     output_labeling = fill_labeled_holes(output_labeling)
     #
     # Remove the border pixels. This is for the challenge which requires
     # a mask. 
     #
     can_remove = lnumbers[touchpair[:, TP_L0]] != lnumbers[touchpair[:, TP_L1]]
     output_labeling[touchpair[can_remove, TP_I0], 
                     touchpair[can_remove, TP_J0]] = 0
     output_labeling[touchpair[can_remove, TP_I1], 
                     touchpair[can_remove, TP_J1]] = 0
     output_objects = cpo.Objects()
     output_objects.segmented = output_labeling
     object_set.add_objects(output_objects, output_objects_name)
     nobjects = np.max(output_labeling)
     I.add_object_count_measurements(workspace.measurements,
                                     output_objects_name,
                                     nobjects)
     I.add_object_location_measurements(workspace.measurements,
                                        output_objects_name, 
                                        output_labeling, nobjects)
     # Make an outline image
     outline_image = cellprofiler.cpmath.outline.outline(output_labeling).astype(bool)
     out_img = cpi.Image(outline_image,
                         parent_image = image_set.get_image(input_image_name))
     workspace.image_set.add(self.output_outlines_name.value, out_img)
     
     workspace.display_data.input_pixels = input_image
     workspace.display_data.input_labels = input_labeling
     workspace.display_data.output_labels = output_labeling
     workspace.display_data.outlines = outline_image