Exemplo n.º 1
0
    def filtered_labels(self, workspace, gridding):
        """Filter labels by proximity to edges of grid"""
        #
        # A label might slightly graze a grid other than its own or
        # a label might be something small in a corner of the grid.
        # This function filters out those parts of the guide labels matrix
        #
        assert isinstance(gridding, Grid)
        guide_labels = self.get_guide_labels(workspace)
        labels = self.fill_grid(workspace, gridding)

        centers = numpy.zeros((2, numpy.max(guide_labels) + 1))
        centers[:, 1:] = centers_of_labels(guide_labels)
        bad_centers = (
            (~numpy.isfinite(centers[0, :]))
            | (~numpy.isfinite(centers[1, :]))
            | (centers[0, :] >= labels.shape[0])
            | (centers[1, :] >= labels.shape[1])
        )
        centers = numpy.round(centers).astype(int)
        masked_labels = labels.copy()
        x_border = int(numpy.ceil(gridding.x_spacing / 10))
        y_border = int(numpy.ceil(gridding.y_spacing / 10))
        #
        # erase anything that's not like what's next to it
        #
        ymask = labels[y_border:, :] != labels[:-y_border, :]
        masked_labels[y_border:, :][ymask] = 0
        masked_labels[:-y_border, :][ymask] = 0
        xmask = labels[:, x_border:] != labels[:, :-x_border]
        masked_labels[:, x_border:][xmask] = 0
        masked_labels[:, :-x_border][xmask] = 0
        #
        # Find out the grid that each center falls into. If a center falls
        # into the border region, it will get a grid number of 0 and be
        # erased. The guide objects may fall below or to the right of the
        # grid or there may be gaps in numbering, so we set the center label
        # of bad centers to 0.
        #
        centers[:, bad_centers] = 0
        lcenters = masked_labels[centers[0, :], centers[1, :]]
        lcenters[bad_centers] = 0
        #
        # Use the guide labels to look up the corresponding center for
        # each guide object pixel. Mask out guide labels that don't match
        # centers.
        #
        mask = numpy.zeros(guide_labels.shape, bool)
        ii_labels = numpy.index_exp[0 : labels.shape[0], 0 : labels.shape[1]]
        mask[ii_labels] = lcenters[guide_labels[ii_labels]] != labels
        mask[guide_labels == 0] = True
        mask[lcenters[guide_labels] == 0] = True
        filtered_guide_labels = guide_labels.copy()
        filtered_guide_labels[mask] = 0
        return filtered_guide_labels
    def filtered_labels(self, workspace, gridding):
        '''Filter labels by proximity to edges of grid'''
        #
        # A label might slightly graze a grid other than its own or
        # a label might be something small in a corner of the grid.
        # This function filters out those parts of the guide labels matrix
        #
        assert isinstance(gridding, cpg.Grid)
        guide_labels = self.get_guide_labels(workspace)
        labels = self.fill_grid(workspace, gridding)

        centers = np.zeros((2, np.max(guide_labels) + 1))
        centers[:, 1:] = centers_of_labels(guide_labels)
        bad_centers = ((~ np.isfinite(centers[0, :])) |
                       (~ np.isfinite(centers[1, :])) |
                       (centers[0, :] >= labels.shape[0]) |
                       (centers[1, :] >= labels.shape[1]))
        centers = np.round(centers).astype(int)
        masked_labels = labels.copy()
        x_border = int(np.ceil(gridding.x_spacing / 10))
        y_border = int(np.ceil(gridding.y_spacing / 10))
        #
        # erase anything that's not like what's next to it
        #
        ymask = labels[y_border:, :] != labels[:-y_border, :]
        masked_labels[y_border:, :][ymask] = 0
        masked_labels[:-y_border, :][ymask] = 0
        xmask = labels[:, x_border:] != labels[:, :-x_border]
        masked_labels[:, x_border:][xmask] = 0
        masked_labels[:, :-x_border][xmask] = 0
        #
        # Find out the grid that each center falls into. If a center falls
        # into the border region, it will get a grid number of 0 and be
        # erased. The guide objects may fall below or to the right of the
        # grid or there may be gaps in numbering, so we set the center label
        # of bad centers to 0.
        #
        centers[:, bad_centers] = 0
        lcenters = masked_labels[centers[0, :], centers[1, :]]
        lcenters[bad_centers] = 0
        #
        # Use the guide labels to look up the corresponding center for
        # each guide object pixel. Mask out guide labels that don't match
        # centers.
        #
        mask = np.zeros(guide_labels.shape, bool)
        ii_labels = np.index_exp[0:labels.shape[0], 0:labels.shape[1]]
        mask[ii_labels] = lcenters[guide_labels[ii_labels]] != labels
        mask[guide_labels == 0] = True
        mask[lcenters[guide_labels] == 0] = True
        filtered_guide_labels = guide_labels.copy()
        filtered_guide_labels[mask] = 0
        return filtered_guide_labels
Exemplo n.º 3
0
 def calculate_centroid_distances(self, workspace, parent_name):
     '''Calculate the centroid-centroid distance between parent & child'''
     meas = workspace.measurements
     assert isinstance(meas,cpmeas.Measurements)
     sub_object_name = self.sub_object_name.value
     parents = workspace.object_set.get_objects(parent_name)
     children = workspace.object_set.get_objects(sub_object_name)
     parents_of = self.get_parents_of(workspace, parent_name)
     pcenters = centers_of_labels(parents.segmented).transpose()
     ccenters = centers_of_labels(children.segmented).transpose()
     if pcenters.shape[0] == 0 or ccenters.shape[0] == 0:
         dist = np.array([np.NaN] * len(parents_of))
     else:
         #
         # Make indexing of parents_of be same as pcenters
         #
         parents_of = parents_of - 1
         mask = (parents_of != -1) | (parents_of > pcenters.shape[0])
         dist = np.array([np.NaN] * ccenters.shape[0])
         dist[mask] = np.sqrt(np.sum((ccenters[mask,:] - 
                                      pcenters[parents_of[mask],:])**2,1))
     meas.add_measurement(sub_object_name, FF_CENTROID % parent_name, dist)
Exemplo n.º 4
0
 def calculate_centroid_distances(self, workspace, parent_name):
     '''Calculate the centroid-centroid distance between parent & child'''
     meas = workspace.measurements
     assert isinstance(meas, cpmeas.Measurements)
     sub_object_name = self.sub_object_name.value
     parents = workspace.object_set.get_objects(parent_name)
     children = workspace.object_set.get_objects(sub_object_name)
     parents_of = self.get_parents_of(workspace, parent_name)
     pcenters = centers_of_labels(parents.segmented).transpose()
     ccenters = centers_of_labels(children.segmented).transpose()
     if pcenters.shape[0] == 0 or ccenters.shape[0] == 0:
         dist = np.array([np.NaN] * len(parents_of))
     else:
         #
         # Make indexing of parents_of be same as pcenters
         #
         parents_of = parents_of - 1
         mask = (parents_of != -1) | (parents_of > pcenters.shape[0])
         dist = np.array([np.NaN] * ccenters.shape[0])
         dist[mask] = np.sqrt(
             np.sum((ccenters[mask, :] - pcenters[parents_of[mask], :])**2,
                    1))
     meas.add_measurement(sub_object_name, FF_CENTROID % parent_name, dist)
Exemplo n.º 5
0
    def run_automatic(self, workspace):
        """Automatically define a grid based on objects

        Returns a CPGridInfo object
        """
        objects = workspace.object_set.get_objects(self.object_name.value)
        centroids = centers_of_labels(objects.segmented)
        try:
            if centroids.shape[1] < 2:
                #
                # Failed if too few objects
                #
                raise RuntimeError("%s has too few grid cells" %
                                   self.object_name.value)
            #
            # Artificially swap these to match the user's orientation
            #
            first_row, second_row = (1, self.grid_rows.value)
            if self.origin in (NUM_BOTTOM_LEFT, NUM_BOTTOM_RIGHT):
                first_row, second_row = (second_row, first_row)
            first_column, second_column = (1, self.grid_columns.value)
            if self.origin in (NUM_TOP_RIGHT, NUM_BOTTOM_RIGHT):
                first_column, second_column = (second_column, first_column)
            first_x = np.min(centroids[1, :])
            first_y = np.min(centroids[0, :])
            second_x = np.max(centroids[1, :])
            second_y = np.max(centroids[0, :])
            result = self.build_grid_info(
                first_x,
                first_y,
                first_row,
                first_column,
                second_x,
                second_y,
                second_row,
                second_column,
                objects.segmented.shape,
            )
        except Exception:
            if self.failed_grid_choice != FAIL_NO:
                result = self.get_good_gridding(workspace)
                if result is None:
                    raise RuntimeError(
                        "%s has too few grid cells and there is no previous successful grid"
                        % self.object_name.value)
            raise
        return result
 def run_natural_circle(self, workspace, gridding):
     '''Return a labels matrix composed of circles found from objects'''
     #
     # Find the centroid of any guide label in a grid
     #
     guide_label = self.filtered_labels(workspace, gridding)
     labels = self.fill_grid(workspace, gridding)
     labels[guide_label[0:labels.shape[0], 0:labels.shape[1]] == 0] = 0
     centers_i, centers_j = centers_of_labels(labels)
     nmissing = np.max(gridding.spot_table) - len(centers_i)
     if nmissing > 0:
         centers_i = np.hstack((centers_i, [np.NaN] * nmissing))
         centers_j = np.hstack((centers_j, [np.NaN] * nmissing))
     #
     # Broadcast these using the spot table
     #
     centers_i = centers_i[gridding.spot_table - 1]
     centers_j = centers_j[gridding.spot_table - 1]
     return self.run_circle(workspace, gridding, centers_i, centers_j)
 def run_natural_circle(self, workspace, gridding):
     """Return a labels matrix composed of circles found from objects"""
     #
     # Find the centroid of any guide label in a grid
     #
     guide_label = self.filtered_labels(workspace, gridding)
     labels = self.fill_grid(workspace, gridding)
     labels[guide_label[0:labels.shape[0], 0:labels.shape[1]] == 0] = 0
     centers_i, centers_j = centers_of_labels(labels)
     nmissing = numpy.max(gridding.spot_table) - len(centers_i)
     if nmissing > 0:
         centers_i = numpy.hstack((centers_i, [numpy.NaN] * nmissing))
         centers_j = numpy.hstack((centers_j, [numpy.NaN] * nmissing))
     #
     # Broadcast these using the spot table
     #
     centers_i = centers_i[gridding.spot_table - 1]
     centers_j = centers_j[gridding.spot_table - 1]
     return self.run_circle(workspace, gridding, centers_i, centers_j)
    def make_workspace(self, measurement, labels=None, image=None):
        object_set = cpo.ObjectSet()
        module = D.DisplayDataOnImage()
        module.module_num = 1
        module.image_name.value = INPUT_IMAGE_NAME
        module.display_image.value = OUTPUT_IMAGE_NAME
        module.objects_name.value = OBJECTS_NAME
        m = cpmeas.Measurements()

        if labels is None:
            module.objects_or_image.value = D.OI_IMAGE
            m.add_image_measurement(MEASUREMENT_NAME, measurement)
            if image is None:
                image = np.zeros((50, 120))
        else:
            module.objects_or_image.value = D.OI_OBJECTS
            o = cpo.Objects()
            o.segmented = labels
            object_set.add_objects(o, OBJECTS_NAME)
            m.add_measurement(OBJECTS_NAME, MEASUREMENT_NAME,
                              np.array(measurement))
            y, x = centers_of_labels(labels)
            m.add_measurement(OBJECTS_NAME, "Location_Center_X", x)
            m.add_measurement(OBJECTS_NAME, "Location_Center_Y", y)
            if image is None:
                image = np.zeros(labels.shape)
        module.measurement.value = MEASUREMENT_NAME

        pipeline = cpp.Pipeline()

        def callback(caller, event):
            self.assertFalse(isinstance(event, cpp.RunExceptionEvent))

        pipeline.add_listener(callback)
        pipeline.add_module(module)
        image_set_list = cpi.ImageSetList()
        image_set = image_set_list.get_image_set(0)
        image_set.add(INPUT_IMAGE_NAME, cpi.Image(image))

        workspace = cpw.Workspace(pipeline, module, image_set, object_set, m,
                                  image_set_list)
        return workspace, module
    def make_workspace(self, measurement, labels=None, image=None):
        object_set = cpo.ObjectSet()
        module = D.DisplayDataOnImage()
        module.module_num = 1
        module.image_name.value = INPUT_IMAGE_NAME
        module.display_image.value = OUTPUT_IMAGE_NAME
        module.objects_name.value = OBJECTS_NAME
        m = cpmeas.Measurements()

        if labels is None:
            module.objects_or_image.value = D.OI_IMAGE
            m.add_image_measurement(MEASUREMENT_NAME, measurement)
            if image is None:
                image = np.zeros((50, 120))
        else:
            module.objects_or_image.value = D.OI_OBJECTS
            o = cpo.Objects()
            o.segmented = labels
            object_set.add_objects(o, OBJECTS_NAME)
            m.add_measurement(OBJECTS_NAME, MEASUREMENT_NAME, np.array(measurement))
            y, x = centers_of_labels(labels)
            m.add_measurement(OBJECTS_NAME, "Location_Center_X", x)
            m.add_measurement(OBJECTS_NAME, "Location_Center_Y", y)
            if image is None:
                image = np.zeros(labels.shape)
        module.measurement.value = MEASUREMENT_NAME

        pipeline = cpp.Pipeline()

        def callback(caller, event):
            self.assertFalse(isinstance(event, cpp.RunExceptionEvent))

        pipeline.add_listener(callback)
        pipeline.add_module(module)
        image_set_list = cpi.ImageSetList()
        image_set = image_set_list.get_image_set(0)
        image_set.add(INPUT_IMAGE_NAME, cpi.Image(image))

        workspace = cpw.Workspace(pipeline, module, image_set, object_set,
                                  m, image_set_list)
        return workspace, module
Exemplo n.º 10
0
    def run_automatic(self, workspace):
        '''Automatically define a grid based on objects

        Returns a CPGridInfo object
        '''
        objects = workspace.object_set.get_objects(self.object_name.value)
        centroids = centers_of_labels(objects.segmented)
        try:
            if centroids.shape[1] < 2:
                #
                # Failed if too few objects
                #
                raise RuntimeError("%s has too few grid cells" %
                                   self.object_name.value)
            #
            # Artificially swap these to match the user's orientation
            #
            first_row, second_row = (1, self.grid_rows.value)
            if self.origin in (NUM_BOTTOM_LEFT, NUM_BOTTOM_RIGHT):
                first_row, second_row = (second_row, first_row)
            first_column, second_column = (1, self.grid_columns.value)
            if self.origin in (NUM_TOP_RIGHT, NUM_BOTTOM_RIGHT):
                first_column, second_column = (second_column, first_column)
            first_x = np.min(centroids[1, :])
            first_y = np.min(centroids[0, :])
            second_x = np.max(centroids[1, :])
            second_y = np.max(centroids[0, :])
            result = self.build_grid_info(first_x, first_y, first_row,
                                          first_column, second_x, second_y,
                                          second_row, second_column,
                                          objects.segmented.shape)
        except Exception:
            if self.failed_grid_choice != FAIL_NO:
                result = self.get_good_gridding(workspace)
                if result is None:
                    raise RuntimeError("%s has too few grid cells and there is no previous successful grid" %
                                       self.object_name.value)
            raise
        return result
Exemplo n.º 11
0
 def calculate_minimum_distances(self, workspace, parent_name):
     '''Calculate the distance from child center to parent perimeter'''
     meas = workspace.measurements
     assert isinstance(meas,cpmeas.Measurements)
     sub_object_name = self.sub_object_name.value
     parents = workspace.object_set.get_objects(parent_name)
     children = workspace.object_set.get_objects(sub_object_name)
     parents_of = self.get_parents_of(workspace, parent_name)
     if len(parents_of) == 0:
         dist = np.zeros((0,))
     elif np.all(parents_of == 0):
         dist = np.array([np.NaN] * len(parents_of))
     else:
         mask = parents_of > 0
         ccenters = centers_of_labels(children.segmented).transpose()
         ccenters = ccenters[mask,:]
         parents_of_masked = parents_of[mask] - 1
         pperim = outline(parents.segmented)
         #
         # Get a list of all points on the perimeter
         #
         perim_loc = np.argwhere(pperim != 0)
         #
         # Get the label # for each point
         #
         perim_idx = pperim[perim_loc[:,0],perim_loc[:,1]]
         #
         # Sort the points by label #
         #
         idx = np.lexsort((perim_loc[:,1],perim_loc[:,0],perim_idx))
         perim_loc = perim_loc[idx,:]
         perim_idx = perim_idx[idx]
         #
         # Get counts and indexes to each run of perimeter points
         #
         counts = fix(scind.sum(np.ones(len(perim_idx)),perim_idx,
                                np.arange(1,perim_idx[-1]+1))).astype(np.int32)
         indexes = np.cumsum(counts) - counts
         #
         # For the children, get the index and count of the parent
         #
         ccounts = counts[parents_of_masked]
         cindexes = indexes[parents_of_masked]
         #
         # Now make an array that has an element for each of that child's
         # perimeter points
         #
         clabel = np.zeros(np.sum(ccounts), int)
         #
         # cfirst is the eventual first index of each child in the
         # clabel array
         #
         cfirst = np.cumsum(ccounts) - ccounts
         clabel[cfirst[1:]] += 1
         clabel = np.cumsum(clabel)
         #
         # Make an index that runs from 0 to ccounts for each
         # child label.
         #
         cp_index = np.arange(len(clabel)) - cfirst[clabel]
         #
         # then add cindexes to get an index to the perimeter point
         #
         cp_index += cindexes[clabel]
         #
         # Now, calculate the distance from the centroid of each label
         # to each perimeter point in the parent.
         #
         dist = np.sqrt(np.sum((perim_loc[cp_index,:] - 
                                ccenters[clabel,:])**2,1))
         #
         # Finally, find the minimum distance per child
         #
         min_dist = fix(scind.minimum(dist, clabel, np.arange(len(ccounts))))
         #
         # Account for unparented children
         #
         dist = np.array([np.NaN] * len(mask))
         dist[mask] = min_dist
     meas.add_measurement(sub_object_name, FF_MINIMUM % parent_name, dist)
Exemplo n.º 12
0
 def calculate_minimum_distances(self, workspace, parent_name):
     '''Calculate the distance from child center to parent perimeter'''
     meas = workspace.measurements
     assert isinstance(meas, cpmeas.Measurements)
     sub_object_name = self.sub_object_name.value
     parents = workspace.object_set.get_objects(parent_name)
     children = workspace.object_set.get_objects(sub_object_name)
     parents_of = self.get_parents_of(workspace, parent_name)
     if len(parents_of) == 0:
         dist = np.zeros((0, ))
     elif np.all(parents_of == 0):
         dist = np.array([np.NaN] * len(parents_of))
     else:
         mask = parents_of > 0
         ccenters = centers_of_labels(children.segmented).transpose()
         ccenters = ccenters[mask, :]
         parents_of_masked = parents_of[mask] - 1
         pperim = outline(parents.segmented)
         #
         # Get a list of all points on the perimeter
         #
         perim_loc = np.argwhere(pperim != 0)
         #
         # Get the label # for each point
         #
         perim_idx = pperim[perim_loc[:, 0], perim_loc[:, 1]]
         #
         # Sort the points by label #
         #
         idx = np.lexsort((perim_loc[:, 1], perim_loc[:, 0], perim_idx))
         perim_loc = perim_loc[idx, :]
         perim_idx = perim_idx[idx]
         #
         # Get counts and indexes to each run of perimeter points
         #
         counts = fix(
             scind.sum(np.ones(len(perim_idx)), perim_idx,
                       np.arange(1, perim_idx[-1] + 1))).astype(np.int32)
         indexes = np.cumsum(counts) - counts
         #
         # For the children, get the index and count of the parent
         #
         ccounts = counts[parents_of_masked]
         cindexes = indexes[parents_of_masked]
         #
         # Now make an array that has an element for each of that child's
         # perimeter points
         #
         clabel = np.zeros(np.sum(ccounts), int)
         #
         # cfirst is the eventual first index of each child in the
         # clabel array
         #
         cfirst = np.cumsum(ccounts) - ccounts
         clabel[cfirst[1:]] += 1
         clabel = np.cumsum(clabel)
         #
         # Make an index that runs from 0 to ccounts for each
         # child label.
         #
         cp_index = np.arange(len(clabel)) - cfirst[clabel]
         #
         # then add cindexes to get an index to the perimeter point
         #
         cp_index += cindexes[clabel]
         #
         # Now, calculate the distance from the centroid of each label
         # to each perimeter point in the parent.
         #
         dist = np.sqrt(
             np.sum((perim_loc[cp_index, :] - ccenters[clabel, :])**2, 1))
         #
         # Finally, find the minimum distance per child
         #
         min_dist = fix(scind.minimum(dist, clabel,
                                      np.arange(len(ccounts))))
         #
         # Account for unparented children
         #
         dist = np.array([np.NaN] * len(mask))
         dist[mask] = min_dist
     meas.add_measurement(sub_object_name, FF_MINIMUM % parent_name, dist)
Exemplo n.º 13
0
    def do_measurements(self, workspace, image_name, object_name,
                        center_object_name, center_choice,
                        bin_count_settings, dd):
        '''Perform the radial measurements on the image set

        workspace - workspace that holds images / objects
        image_name - make measurements on this image
        object_name - make measurements on these objects
        center_object_name - use the centers of these related objects as
                      the centers for radial measurements. None to use the
                      objects themselves.
        center_choice - the user's center choice for this object:
                      C_SELF, C_CENTERS_OF_OBJECTS or C_EDGES_OF_OBJECTS.
        bin_count_settings - the bin count settings group
        d - a dictionary for saving reusable partial results

        returns one statistics tuple per ring.
        '''
        assert isinstance(workspace, cpw.Workspace)
        assert isinstance(workspace.object_set, cpo.ObjectSet)
        bin_count = bin_count_settings.bin_count.value
        wants_scaled = bin_count_settings.wants_scaled.value
        maximum_radius = bin_count_settings.maximum_radius.value

        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        labels, pixel_data = cpo.crop_labels_and_image(objects.segmented,
                                                       image.pixel_data)
        nobjects = np.max(objects.segmented)
        measurements = workspace.measurements
        assert isinstance(measurements, cpmeas.Measurements)
        heatmaps = {}
        for heatmap in self.heatmaps:
            if heatmap.object_name.get_objects_name() == object_name and \
                            image_name == heatmap.image_name.get_image_name() and \
                            heatmap.get_number_of_bins() == bin_count:
                dd[id(heatmap)] = \
                    heatmaps[MEASUREMENT_ALIASES[heatmap.measurement.value]] = \
                    np.zeros(labels.shape)
        if nobjects == 0:
            for bin in range(1, bin_count + 1):
                for feature in (F_FRAC_AT_D, F_MEAN_FRAC, F_RADIAL_CV):
                    feature_name = (
                        (feature + FF_GENERIC) % (image_name, bin, bin_count))
                    measurements.add_measurement(
                            object_name, "_".join([M_CATEGORY, feature_name]),
                            np.zeros(0))
                    if not wants_scaled:
                        measurement_name = "_".join([M_CATEGORY, feature,
                                                     image_name, FF_OVERFLOW])
                        measurements.add_measurement(
                                object_name, measurement_name, np.zeros(0))
            return [(image_name, object_name, "no objects", "-", "-", "-", "-")]
        name = (object_name if center_object_name is None
                else "%s_%s" % (object_name, center_object_name))
        if dd.has_key(name):
            normalized_distance, i_center, j_center, good_mask = dd[name]
        else:
            d_to_edge = distance_to_edge(labels)
            if center_object_name is not None:
                #
                # Use the center of the centering objects to assign a center
                # to each labeled pixel using propagation
                #
                center_objects = workspace.object_set.get_objects(center_object_name)
                center_labels, cmask = cpo.size_similarly(
                        labels, center_objects.segmented)
                pixel_counts = fix(scind.sum(
                        np.ones(center_labels.shape),
                        center_labels,
                        np.arange(1, np.max(center_labels) + 1, dtype=np.int32)))
                good = pixel_counts > 0
                i, j = (centers_of_labels(center_labels) + .5).astype(int)
                ig = i[good]
                jg = j[good]
                lg = np.arange(1, len(i) + 1)[good]
                if center_choice == C_CENTERS_OF_OTHER:
                    #
                    # Reduce the propagation labels to the centers of
                    # the centering objects
                    #
                    center_labels = np.zeros(center_labels.shape, int)
                    center_labels[ig, jg] = lg
                cl, d_from_center = propagate(np.zeros(center_labels.shape),
                                              center_labels,
                                              labels != 0, 1)
                #
                # Erase the centers that fall outside of labels
                #
                cl[labels == 0] = 0
                #
                # If objects are hollow or crescent-shaped, there may be
                # objects without center labels. As a backup, find the
                # center that is the closest to the center of mass.
                #
                missing_mask = (labels != 0) & (cl == 0)
                missing_labels = np.unique(labels[missing_mask])
                if len(missing_labels):
                    all_centers = centers_of_labels(labels)
                    missing_i_centers, missing_j_centers = \
                        all_centers[:, missing_labels - 1]
                    di = missing_i_centers[:, np.newaxis] - ig[np.newaxis, :]
                    dj = missing_j_centers[:, np.newaxis] - jg[np.newaxis, :]
                    missing_best = lg[np.argsort((di * di + dj * dj,))[:, 0]]
                    best = np.zeros(np.max(labels) + 1, int)
                    best[missing_labels] = missing_best
                    cl[missing_mask] = best[labels[missing_mask]]
                    #
                    # Now compute the crow-flies distance to the centers
                    # of these pixels from whatever center was assigned to
                    # the object.
                    #
                    iii, jjj = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
                    di = iii[missing_mask] - i[cl[missing_mask] - 1]
                    dj = jjj[missing_mask] - j[cl[missing_mask] - 1]
                    d_from_center[missing_mask] = np.sqrt(di * di + dj * dj)
            else:
                # Find the point in each object farthest away from the edge.
                # This does better than the centroid:
                # * The center is within the object
                # * The center tends to be an interesting point, like the
                #   center of the nucleus or the center of one or the other
                #   of two touching cells.
                #
                i, j = maximum_position_of_labels(d_to_edge, labels, objects.indices)
                center_labels = np.zeros(labels.shape, int)
                center_labels[i, j] = labels[i, j]
                #
                # Use the coloring trick here to process touching objects
                # in separate operations
                #
                colors = color_labels(labels)
                ncolors = np.max(colors)
                d_from_center = np.zeros(labels.shape)
                cl = np.zeros(labels.shape, int)
                for color in range(1, ncolors + 1):
                    mask = colors == color
                    l, d = propagate(np.zeros(center_labels.shape),
                                     center_labels,
                                     mask, 1)
                    d_from_center[mask] = d[mask]
                    cl[mask] = l[mask]
            good_mask = cl > 0
            if center_choice == C_EDGES_OF_OTHER:
                # Exclude pixels within the centering objects
                # when performing calculations from the centers
                good_mask = good_mask & (center_labels == 0)
            i_center = np.zeros(cl.shape)
            i_center[good_mask] = i[cl[good_mask] - 1]
            j_center = np.zeros(cl.shape)
            j_center[good_mask] = j[cl[good_mask] - 1]

            normalized_distance = np.zeros(labels.shape)
            if wants_scaled:
                total_distance = d_from_center + d_to_edge
                normalized_distance[good_mask] = (d_from_center[good_mask] /
                                                  (total_distance[good_mask] + .001))
            else:
                normalized_distance[good_mask] = \
                    d_from_center[good_mask] / maximum_radius
            dd[name] = [normalized_distance, i_center, j_center, good_mask]
        ngood_pixels = np.sum(good_mask)
        good_labels = labels[good_mask]
        bin_indexes = (normalized_distance * bin_count).astype(int)
        bin_indexes[bin_indexes > bin_count] = bin_count
        labels_and_bins = (good_labels - 1, bin_indexes[good_mask])
        histogram = coo_matrix((pixel_data[good_mask], labels_and_bins),
                               (nobjects, bin_count + 1)).toarray()
        sum_by_object = np.sum(histogram, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_distance = histogram / sum_by_object_per_bin
        number_at_distance = coo_matrix((np.ones(ngood_pixels), labels_and_bins),
                                        (nobjects, bin_count + 1)).toarray()
        object_mask = number_at_distance > 0
        sum_by_object = np.sum(number_at_distance, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_bin = number_at_distance / sum_by_object_per_bin
        mean_pixel_fraction = fraction_at_distance / (fraction_at_bin +
                                                      np.finfo(float).eps)
        masked_fraction_at_distance = masked_array(fraction_at_distance,
                                                   ~object_mask)
        masked_mean_pixel_fraction = masked_array(mean_pixel_fraction,
                                                  ~object_mask)
        # Anisotropy calculation.  Split each cell into eight wedges, then
        # compute coefficient of variation of the wedges' mean intensities
        # in each ring.
        #
        # Compute each pixel's delta from the center object's centroid
        i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
        imask = i[good_mask] > i_center[good_mask]
        jmask = j[good_mask] > j_center[good_mask]
        absmask = (abs(i[good_mask] - i_center[good_mask]) >
                   abs(j[good_mask] - j_center[good_mask]))
        radial_index = (imask.astype(int) + jmask.astype(int) * 2 +
                        absmask.astype(int) * 4)
        statistics = []

        for bin in range(bin_count + (0 if wants_scaled else 1)):
            bin_mask = (good_mask & (bin_indexes == bin))
            bin_pixels = np.sum(bin_mask)
            bin_labels = labels[bin_mask]
            bin_radial_index = radial_index[bin_indexes[good_mask] == bin]
            labels_and_radii = (bin_labels - 1, bin_radial_index)
            radial_values = coo_matrix((pixel_data[bin_mask],
                                        labels_and_radii),
                                       (nobjects, 8)).toarray()
            pixel_count = coo_matrix((np.ones(bin_pixels), labels_and_radii),
                                     (nobjects, 8)).toarray()
            mask = pixel_count == 0
            radial_means = masked_array(radial_values / pixel_count, mask)
            radial_cv = np.std(radial_means, 1) / np.mean(radial_means, 1)
            radial_cv[np.sum(~mask, 1) == 0] = 0
            for measurement, feature, overflow_feature in (
                    (fraction_at_distance[:, bin], MF_FRAC_AT_D, OF_FRAC_AT_D),
                    (mean_pixel_fraction[:, bin], MF_MEAN_FRAC, OF_MEAN_FRAC),
                    (np.array(radial_cv), MF_RADIAL_CV, OF_RADIAL_CV)):

                if bin == bin_count:
                    measurement_name = overflow_feature % image_name
                else:
                    measurement_name = feature % (image_name, bin + 1, bin_count)
                measurements.add_measurement(object_name,
                                             measurement_name,
                                             measurement)
                if feature in heatmaps:
                    heatmaps[feature][bin_mask] = measurement[bin_labels - 1]
            radial_cv.mask = np.sum(~mask, 1) == 0
            bin_name = str(bin + 1) if bin < bin_count else "Overflow"
            statistics += [(image_name, object_name, bin_name, str(bin_count),
                            round(np.mean(masked_fraction_at_distance[:, bin]), 4),
                            round(np.mean(masked_mean_pixel_fraction[:, bin]), 4),
                            round(np.mean(radial_cv), 4))]
        return statistics
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        assert isinstance(objects, cpo.Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        assert isinstance(neighbor_objects, cpo.Objects)
        neighbor_labels = neighbor_objects.small_removed_segmented
        #
        # Need to add in labels touching border.
        #
        unedited_segmented = neighbor_objects.unedited_segmented
        touching_border = np.zeros(np.max(unedited_segmented) + 1, bool)
        touching_border[unedited_segmented[0, :]] = True
        touching_border[unedited_segmented[-1, :]] = True
        touching_border[unedited_segmented[:, 0]] = True
        touching_border[unedited_segmented[:, -1]] = True
        touching_border[0] = False
        touching_border_mask = touching_border[unedited_segmented]
        nobjects = np.max(labels)
        nkept_objects = objects.count
        nneighbors = np.max(neighbor_labels)
        if np.any(touching_border) and \
                np.all(~ touching_border_mask[neighbor_labels != 0]):
            # Add the border labels if any were excluded
            touching_border_object_number = np.cumsum(touching_border) + \
                                            np.max(neighbor_labels)
            touching_border_mask = touching_border_mask & (neighbor_labels
                                                           == 0)
            neighbor_labels = neighbor_labels.copy().astype(np.int32)
            neighbor_labels[
                touching_border_mask] = touching_border_object_number[
                    unedited_segmented[touching_border_mask]]

        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
            neighbor_has_pixels = has_pixels
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.segmented)
            neighbor_has_pixels = np.bincount(neighbor_labels.ravel())[1:] > 0
        neighbor_count = np.zeros((nobjects, ))
        pixel_count = np.zeros((nobjects, ))
        first_object_number = np.zeros((nobjects, ), int)
        second_object_number = np.zeros((nobjects, ), int)
        first_x_vector = np.zeros((nobjects, ))
        second_x_vector = np.zeros((nobjects, ))
        first_y_vector = np.zeros((nobjects, ))
        second_y_vector = np.zeros((nobjects, ))
        angle = np.zeros((nobjects, ))
        percent_touching = np.zeros((nobjects, ))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            i, j = scind.distance_transform_edt(labels == 0,
                                                return_distances=False,
                                                return_indices=True)
            # Assign each background pixel to the label of its nearest
            # foreground pixel. Assign label to label for foreground.
            labels = labels[i, j]
            expanded_labels = labels  # for display
            distance = 1  # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = np.arange(nobjects, dtype=np.int32) + 1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(
                scind.sum(np.ones(labels.shape), labels, object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(
                scind.sum(np.ones(labels.shape), perimeter_outlines,
                          object_indexes))

            i, j = np.mgrid[0:nobjects, 0:nneighbors]
            distance_matrix = np.sqrt((ocenters[i, 0] - ncenters[j, 0])**2 +
                                      (ocenters[i, 1] - ncenters[j, 1])**2)
            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            if distance_matrix.shape[1] == 1:
                # a little buggy, lexsort assumes that a 2-d array of
                # second dimension = 1 is a 1-d array
                order = np.zeros(distance_matrix.shape, int)
            else:
                order = np.lexsort([distance_matrix])
            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index, 1] - ocenters[:, 1]
            first_y_vector = ncenters[first_object_index, 0] - ocenters[:, 0]
            if nneighbors > first_neighbor + 1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,
                                           1] - ocenters[:, 1]
                second_y_vector = ncenters[second_object_index,
                                           0] - ocenters[:, 0]
                v1 = np.array((first_x_vector, first_y_vector))
                v2 = np.array((second_x_vector, second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = (np.sum(v1 * v2, 0) /
                       np.sqrt(np.sum(v1**2, 0) * np.sum(v2**2, 0)))
                angle = np.arccos(dot) * 180. / np.pi

            # Make the structuring element for dilation
            strel = strel_disk(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            strel_touching = strel_disk(distance + .5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
            min_i, max_i, min_i_pos, max_i_pos = \
                scind.extrema(i, labels, object_indexes)
            min_j, max_j, min_j_pos, max_j_pos = \
                scind.extrema(j, labels, object_indexes)
            min_i = np.maximum(fix(min_i) - distance, 0).astype(int)
            max_i = np.minimum(fix(max_i) + distance + 1,
                               labels.shape[0]).astype(int)
            min_j = np.maximum(fix(min_j) - distance, 0).astype(int)
            max_j = np.minimum(fix(max_j) + distance + 1,
                               labels.shape[1]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g. not renumbered.
                    #
                    continue
                index = object_number - 1
                patch = labels[min_i[index]:max_i[index],
                               min_j[index]:max_j[index]]
                npatch = neighbor_labels[min_i[index]:max_i[index],
                                         min_j[index]:max_j[index]]
                #
                # Find the neighbors
                #
                patch_mask = patch == (index + 1)
                extended = scind.binary_dilation(patch_mask, strel)
                neighbors = np.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(np.ones(nc, int) * object_number)
                    second_objects.append(neighbors)
                if self.neighbors_are_objects:
                    #
                    # Find the # of overlapping pixels. Dilate the neighbors
                    # and see how many pixels overlap our image. Use a 3x3
                    # structuring element to expand the overlapping edge
                    # into the perimeter.
                    #
                    outline_patch = perimeter_outlines[
                        min_i[index]:max_i[index],
                        min_j[index]:max_j[index]] == object_number
                    extended = scind.binary_dilation(
                        (patch != 0) & (patch != object_number),
                        strel_touching)
                    overlap = np.sum(outline_patch & extended)
                    pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = np.hstack(first_objects)
                reverse_object_numbers = np.zeros(
                    max(np.max(object_numbers), np.max(first_objects)) + 1,
                    int)
                reverse_object_numbers[object_numbers] = np.arange(
                    len(object_numbers)) + 1
                first_objects = reverse_object_numbers[first_objects]

                second_objects = np.hstack(second_objects)
                reverse_neighbor_numbers = np.zeros(
                    max(np.max(neighbor_numbers), np.max(second_objects)) + 1,
                    int)
                reverse_neighbor_numbers[neighbor_numbers] = np.arange(
                    len(neighbor_numbers)) + 1
                second_objects = reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects = second_objects[to_keep]
            else:
                first_objects = np.zeros(0, int)
                second_objects = np.zeros(0, int)
            if self.neighbors_are_objects:
                percent_touching = pixel_count * 100 / perimeters
            else:
                percent_touching = pixel_count * 100.0 / areas
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = np.zeros(nkept_objects, int)
            second_object_number = np.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, np.newaxis], 0] -
                      ncenters[neighbor_indexes[np.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, np.newaxis], 1] -
                      ncenters[neighbor_indexes[np.newaxis, :], 1])
                distance_matrix = np.sqrt(di * di + dj * dj)
                distance_matrix[~has_pixels, :] = np.inf
                distance_matrix[:, ~neighbor_has_pixels] = np.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = np.lexsort([distance_matrix
                                    ]).astype(first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels, 1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels, 0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = np.zeros(0, int)
            second_objects = np.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert (isinstance(workspace, cpw.Workspace))
        m = workspace.measurements
        assert (isinstance(m, cpmeas.Measurements))
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (M_FIRST_CLOSEST_DISTANCE,
             np.sqrt(first_x_vector**2 + first_y_vector**2)),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (M_SECOND_CLOSEST_DISTANCE,
             np.sqrt(second_x_vector**2 + second_y_vector**2)),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle)
        ]
        if self.neighbors_are_objects:
            features_and_data.append((M_PERCENT_TOUCHING, percent_touching))
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name), data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num, cpmeas.NEIGHBORS, self.object_name.value,
                self.object_name.value
                if self.neighbors_are_objects else self.neighbors_name.value,
                m.image_set_number * np.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * np.ones(second_objects.shape, int),
                second_objects)

        labels = kept_labels

        neighbor_count_image = np.zeros(labels.shape, int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask] - 1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image

        if self.neighbors_are_objects:
            percent_touching_image = np.zeros(labels.shape)
            percent_touching_image[object_mask] = percent_touching[
                object_indexes]
            workspace.display_data.percent_touching_image = percent_touching_image

        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            count_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = cpprefs.get_default_colormap()
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.neighbors_are_objects and self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            touching_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.touching_image_name.value, touching_image)
        else:
            percent_touching_cm_name = cpprefs.get_default_colormap()
            percent_touching_cm = matplotlib.cm.get_cmap(
                percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
    def filter_using_image(self, workspace, mask):
        '''Filter out connections using local intensity minima between objects

        workspace - the workspace for the image set
        mask - mask of background points within the minimum distance
        '''
        #
        # NOTE: This is an efficient implementation and an improvement
        #       in accuracy over the Matlab version. It would be faster and
        #       more accurate to eliminate the line-connecting and instead
        #       do the following:
        #     * Distance transform to get the coordinates of the closest
        #       point in an object for points in the background that are
        #       at most 1/2 of the max distance between objects.
        #     * Take the intensity at this closest point and similarly
        #       label the background point if the background intensity
        #       is at least the minimum intensity fraction
        #     * Assume there is a connection between objects if, after this
        #       labeling, there are adjacent points in each object.
        #
        # As it is, the algorithm duplicates the Matlab version but suffers
        # for cells whose intensity isn't high in the centroid and clearly
        # suffers when two cells touch at some point that's off of the line
        # between the two.
        #
        objects = workspace.object_set.get_objects(self.objects_name.value)
        labels = objects.segmented
        image = self.get_image(workspace)
        if self.show_window:
            # Save the image for display
            workspace.display_data.image = image
        #
        # Do a distance transform into the background to label points
        # in the background with their closest foreground object
        #
        i, j = scind.distance_transform_edt(labels==0,
                                            return_indices=True,
                                            return_distances=False)
        confluent_labels = labels[i,j]
        confluent_labels[~mask] = 0
        if self.where_algorithm == CA_CLOSEST_POINT:
            #
            # For the closest point method, find the intensity at
            # the closest point in the object (which will be the point itself
            # for points in the object).
            #
            object_intensity = image[i,j] * self.minimum_intensity_fraction.value
            confluent_labels[object_intensity > image] = 0
        count, index, c_j = morph.find_neighbors(confluent_labels)
        if len(c_j) == 0:
            # Nobody touches - return the labels matrix
            return labels
        #
        # Make a row of i matching the touching j
        #
        c_i = np.zeros(len(c_j))
        #
        # Eliminate labels without matches
        #
        label_numbers = np.arange(1,len(count)+1)[count > 0]
        index = index[count > 0]
        count = count[count > 0]
        #
        # Get the differences between labels so we can use a cumsum trick
        # to increment to the next label when they change
        #
        label_numbers[1:] = label_numbers[1:] - label_numbers[:-1]
        c_i[index] = label_numbers
        c_i = np.cumsum(c_i).astype(int)
        if self.where_algorithm == CA_CENTROIDS:
            #
            # Only connect points > minimum intensity fraction
            #
            center_i, center_j = morph.centers_of_labels(labels)
            indexes, counts, i, j = morph.get_line_pts(
                center_i[c_i-1], center_j[c_i-1],
                center_i[c_j-1], center_j[c_j-1])
            #
            # The indexes of the centroids at pt1
            #
            last_indexes = indexes+counts-1
            #
            # The minimum of the intensities at pt0 and pt1
            #
            centroid_intensities = np.minimum(
                image[i[indexes],j[indexes]],
                image[i[last_indexes], j[last_indexes]])
            #
            # Assign label numbers to each point so we can use
            # scipy.ndimage.minimum. The label numbers are indexes into
            # "connections" above.
            #
            pt_labels = np.zeros(len(i), int)
            pt_labels[indexes[1:]] = 1
            pt_labels = np.cumsum(pt_labels)
            minima = scind.minimum(image[i,j], pt_labels, np.arange(len(indexes)))
            minima = morph.fixup_scipy_ndimage_result(minima)
            #
            # Filter the connections using the image
            #
            mif = self.minimum_intensity_fraction.value
            i = c_i[centroid_intensities * mif <= minima]
            j = c_j[centroid_intensities * mif <= minima]
        else:
            i = c_i
            j = c_j
        #
        # Add in connections from self to self
        #
        unique_labels = np.unique(labels)
        i = np.hstack((i, unique_labels))
        j = np.hstack((j, unique_labels))
        #
        # Run "all_connected_components" to get a component # for
        # objects identified as same.
        #
        new_indexes = morph.all_connected_components(i, j)
        new_labels = np.zeros(labels.shape, int)
        new_labels[labels != 0] = new_indexes[labels[labels != 0]]
        return new_labels
Exemplo n.º 16
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        assert isinstance(objects, cpo.Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        assert isinstance(neighbor_objects, cpo.Objects)
        neighbor_labels = neighbor_objects.small_removed_segmented
        #
        # Need to add in labels touching border.
        #
        unedited_segmented = neighbor_objects.unedited_segmented
        touching_border = np.zeros(np.max(unedited_segmented) + 1, bool)
        touching_border[unedited_segmented[0, :]] = True
        touching_border[unedited_segmented[-1, :]] = True
        touching_border[unedited_segmented[:, 0]] = True
        touching_border[unedited_segmented[:, -1]] = True
        touching_border[0] = False
        touching_border_mask = touching_border[unedited_segmented]
        nobjects = np.max(labels)
        nkept_objects = objects.count
        nneighbors = np.max(neighbor_labels)
        if np.any(touching_border) and \
           np.all(~ touching_border_mask[neighbor_labels!=0]):
            # Add the border labels if any were excluded
            touching_border_object_number = np.cumsum(touching_border) + \
                np.max(neighbor_labels)
            touching_border_mask = touching_border_mask & (neighbor_labels == 0)
            neighbor_labels = neighbor_labels.copy().astype(np.int32)
            neighbor_labels[touching_border_mask] = touching_border_object_number[
                unedited_segmented[touching_border_mask]]
        
        neighbor_has_pixels = np.bincount(neighbor_labels.ravel())[1:] > 0
        
        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.segmented)
        neighbor_count = np.zeros((nobjects,))
        pixel_count = np.zeros((nobjects,))
        first_object_number = np.zeros((nobjects,),int)
        second_object_number = np.zeros((nobjects,),int)
        first_x_vector = np.zeros((nobjects,))
        second_x_vector = np.zeros((nobjects,))
        first_y_vector = np.zeros((nobjects,))
        second_y_vector = np.zeros((nobjects,))
        angle = np.zeros((nobjects,))
        percent_touching = np.zeros((nobjects,))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            i,j = scind.distance_transform_edt(labels==0,
                                               return_distances=False,
                                               return_indices=True)
            # Assign each background pixel to the label of its nearest
            # foreground pixel. Assign label to label for foreground.
            labels = labels[i,j]
            expanded_labels = labels  # for display
            distance = 1 # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = np.arange(nobjects, dtype=np.int32)+1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(scind.sum(np.ones(labels.shape),labels, object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(scind.sum(
                np.ones(labels.shape), perimeter_outlines, object_indexes))
                                       
            i,j = np.mgrid[0:nobjects,0:nneighbors]
            distance_matrix = np.sqrt((ocenters[i,0] - ncenters[j,0])**2 +
                                      (ocenters[i,1] - ncenters[j,1])**2)
            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            if distance_matrix.shape[1] == 1:
                # a little buggy, lexsort assumes that a 2-d array of
                # second dimension = 1 is a 1-d array
                order = np.zeros(distance_matrix.shape, int)
            else:
                order = np.lexsort([distance_matrix])
            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index,1] - ocenters[:,1]
            first_y_vector = ncenters[first_object_index,0] - ocenters[:,0]
            if nneighbors > first_neighbor+1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,1] - ocenters[:,1]
                second_y_vector = ncenters[second_object_index,0] - ocenters[:,0]
                v1 = np.array((first_x_vector,first_y_vector))
                v2 = np.array((second_x_vector,second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = (np.sum(v1*v2,0) / 
                       np.sqrt(np.sum(v1**2,0)*np.sum(v2**2,0)))
                angle = np.arccos(dot) * 180. / np.pi
            
            # Make the structuring element for dilation
            strel = strel_disk(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            strel_touching = strel_disk(distance + .5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            i,j = np.mgrid[0:labels.shape[0],0:labels.shape[1]]
            min_i, max_i, min_i_pos, max_i_pos =\
                scind.extrema(i,labels,object_indexes)
            min_j, max_j, min_j_pos, max_j_pos =\
                scind.extrema(j,labels,object_indexes)
            min_i = np.maximum(fix(min_i)-distance,0).astype(int)
            max_i = np.minimum(fix(max_i)+distance+1,labels.shape[0]).astype(int)
            min_j = np.maximum(fix(min_j)-distance,0).astype(int)
            max_j = np.minimum(fix(max_j)+distance+1,labels.shape[1]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g. not renumbered.
                    #
                    continue
                index = object_number - 1
                patch = labels[min_i[index]:max_i[index],
                               min_j[index]:max_j[index]]
                npatch = neighbor_labels[min_i[index]:max_i[index],
                                         min_j[index]:max_j[index]]
                #
                # Find the neighbors
                #
                patch_mask = patch==(index+1)
                extended = scind.binary_dilation(patch_mask,strel)
                neighbors = np.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(np.ones(nc,int) * object_number)
                    second_objects.append(neighbors)
                if self.neighbors_are_objects:
                    #
                    # Find the # of overlapping pixels. Dilate the neighbors
                    # and see how many pixels overlap our image. Use a 3x3
                    # structuring element to expand the overlapping edge
                    # into the perimeter.
                    #
                    outline_patch = perimeter_outlines[
                        min_i[index]:max_i[index],
                        min_j[index]:max_j[index]] == object_number
                    extended = scind.binary_dilation(
                        (patch != 0) & (patch != object_number), strel_touching)
                    overlap = np.sum(outline_patch & extended)
                    pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = np.hstack(first_objects)
                reverse_object_numbers = np.zeros(
                    max(np.max(object_numbers), np.max(first_objects)) + 1, int)
                reverse_object_numbers[object_numbers] = np.arange(len(object_numbers)) + 1
                first_objects = reverse_object_numbers[first_objects]
    
                second_objects = np.hstack(second_objects)
                reverse_neighbor_numbers = np.zeros(
                    max(np.max(neighbor_numbers), np.max(second_objects)) + 1, int)
                reverse_neighbor_numbers[neighbor_numbers] = np.arange(len(neighbor_numbers)) + 1
                second_objects= reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects  = second_objects[to_keep]
            else:
                first_objects = np.zeros(0, int)
                second_objects = np.zeros(0, int)
            if self.neighbors_are_objects:
                percent_touching = pixel_count * 100 / perimeters
            else:
                percent_touching = pixel_count * 100.0 / areas
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = np.zeros(nkept_objects, int)
            second_object_number = np.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, np.newaxis], 0] - 
                      ncenters[neighbor_indexes[np.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, np.newaxis], 1] - 
                      ncenters[neighbor_indexes[np.newaxis, :], 1])
                distance_matrix = np.sqrt(di*di + dj*dj)
                distance_matrix[~ has_pixels, :] = np.inf
                distance_matrix[:, ~neighbor_has_pixels] = np.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = np.lexsort([distance_matrix]).astype(
                    first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels,1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels,0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = np.zeros(0, int)
            second_objects = np.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~ has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~ has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert(isinstance(workspace, cpw.Workspace))
        m = workspace.measurements
        assert(isinstance(m, cpmeas.Measurements))
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (M_FIRST_CLOSEST_DISTANCE, np.sqrt(first_x_vector**2+first_y_vector**2)),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (M_SECOND_CLOSEST_DISTANCE, np.sqrt(second_x_vector**2+second_y_vector**2)),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle)]
        if self.neighbors_are_objects:
            features_and_data.append((M_PERCENT_TOUCHING, percent_touching))
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name),
                              data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num, 
                cpmeas.NEIGHBORS,
                self.object_name.value,
                self.object_name.value if self.neighbors_are_objects 
                else self.neighbors_name.value,
                m.image_set_number * np.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * np.ones(second_objects.shape, int),
                second_objects)
                                 
        labels = kept_labels
        
        neighbor_count_image = np.zeros(labels.shape,int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask]-1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image
        
        if self.neighbors_are_objects:
            percent_touching_image = np.zeros(labels.shape)
            percent_touching_image[object_mask] = percent_touching[object_indexes]
            workspace.display_data.percent_touching_image = percent_touching_image
        
        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap = neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:,:,:3]
            img[:,:,0][~ object_mask] = 0
            img[:,:,1][~ object_mask] = 0
            img[:,:,2][~ object_mask] = 0
            count_image = cpi.Image(img, masking_objects = objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = cpprefs.get_default_colormap()
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.neighbors_are_objects and self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap = percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:,:,:3]
            img[:,:,0][~ object_mask] = 0
            img[:,:,1][~ object_mask] = 0
            img[:,:,2][~ object_mask] = 0
            touching_image = cpi.Image(img, masking_objects = objects)
            image_set.add(self.touching_image_name.value,
                          touching_image)
        else:
            percent_touching_cm_name = cpprefs.get_default_colormap()
            percent_touching_cm = matplotlib.cm.get_cmap(percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
Exemplo n.º 17
0
    def filter_using_image(self, workspace, mask):
        '''Filter out connections using local intensity minima between objects

        workspace - the workspace for the image set
        mask - mask of background points within the minimum distance
        '''
        #
        # NOTE: This is an efficient implementation and an improvement
        #       in accuracy over the Matlab version. It would be faster and
        #       more accurate to eliminate the line-connecting and instead
        #       do the following:
        #     * Distance transform to get the coordinates of the closest
        #       point in an object for points in the background that are
        #       at most 1/2 of the max distance between objects.
        #     * Take the intensity at this closest point and similarly
        #       label the background point if the background intensity
        #       is at least the minimum intensity fraction
        #     * Assume there is a connection between objects if, after this
        #       labeling, there are adjacent points in each object.
        #
        # As it is, the algorithm duplicates the Matlab version but suffers
        # for cells whose intensity isn't high in the centroid and clearly
        # suffers when two cells touch at some point that's off of the line
        # between the two.
        #
        objects = workspace.object_set.get_objects(self.objects_name.value)
        labels = objects.segmented
        image = self.get_image(workspace)
        if self.show_window:
            # Save the image for display
            workspace.display_data.image = image
        #
        # Do a distance transform into the background to label points
        # in the background with their closest foreground object
        #
        i, j = scind.distance_transform_edt(labels == 0,
                                            return_indices=True,
                                            return_distances=False)
        confluent_labels = labels[i, j]
        confluent_labels[~mask] = 0
        if self.where_algorithm == CA_CLOSEST_POINT:
            #
            # For the closest point method, find the intensity at
            # the closest point in the object (which will be the point itself
            # for points in the object).
            #
            object_intensity = image[i,
                                     j] * self.minimum_intensity_fraction.value
            confluent_labels[object_intensity > image] = 0
        count, index, c_j = morph.find_neighbors(confluent_labels)
        if len(c_j) == 0:
            # Nobody touches - return the labels matrix
            return labels
        #
        # Make a row of i matching the touching j
        #
        c_i = np.zeros(len(c_j))
        #
        # Eliminate labels without matches
        #
        label_numbers = np.arange(1, len(count) + 1)[count > 0]
        index = index[count > 0]
        count = count[count > 0]
        #
        # Get the differences between labels so we can use a cumsum trick
        # to increment to the next label when they change
        #
        label_numbers[1:] = label_numbers[1:] - label_numbers[:-1]
        c_i[index] = label_numbers
        c_i = np.cumsum(c_i).astype(int)
        if self.where_algorithm == CA_CENTROIDS:
            #
            # Only connect points > minimum intensity fraction
            #
            center_i, center_j = morph.centers_of_labels(labels)
            indexes, counts, i, j = morph.get_line_pts(center_i[c_i - 1],
                                                       center_j[c_i - 1],
                                                       center_i[c_j - 1],
                                                       center_j[c_j - 1])
            #
            # The indexes of the centroids at pt1
            #
            last_indexes = indexes + counts - 1
            #
            # The minimum of the intensities at pt0 and pt1
            #
            centroid_intensities = np.minimum(
                image[i[indexes], j[indexes]], image[i[last_indexes],
                                                     j[last_indexes]])
            #
            # Assign label numbers to each point so we can use
            # scipy.ndimage.minimum. The label numbers are indexes into
            # "connections" above.
            #
            pt_labels = np.zeros(len(i), int)
            pt_labels[indexes[1:]] = 1
            pt_labels = np.cumsum(pt_labels)
            minima = scind.minimum(image[i, j], pt_labels,
                                   np.arange(len(indexes)))
            minima = morph.fixup_scipy_ndimage_result(minima)
            #
            # Filter the connections using the image
            #
            mif = self.minimum_intensity_fraction.value
            i = c_i[centroid_intensities * mif <= minima]
            j = c_j[centroid_intensities * mif <= minima]
        else:
            i = c_i
            j = c_j
        #
        # Add in connections from self to self
        #
        unique_labels = np.unique(labels)
        i = np.hstack((i, unique_labels))
        j = np.hstack((j, unique_labels))
        #
        # Run "all_connected_components" to get a component # for
        # objects identified as same.
        #
        new_indexes = morph.all_connected_components(i, j)
        new_labels = np.zeros(labels.shape, int)
        new_labels[labels != 0] = new_indexes[labels[labels != 0]]
        return new_labels
    def do_measurements(self, workspace, image_name, object_name,
                        center_object_name, center_choice,
                        bin_count_settings, dd):
        '''Perform the radial measurements on the image set

        workspace - workspace that holds images / objects
        image_name - make measurements on this image
        object_name - make measurements on these objects
        center_object_name - use the centers of these related objects as
                      the centers for radial measurements. None to use the
                      objects themselves.
        center_choice - the user's center choice for this object:
                      C_SELF, C_CENTERS_OF_OBJECTS or C_EDGES_OF_OBJECTS.
        bin_count_settings - the bin count settings group
        d - a dictionary for saving reusable partial results

        returns one statistics tuple per ring.
        '''
        assert isinstance(workspace, cpw.Workspace)
        assert isinstance(workspace.object_set, cpo.ObjectSet)
        bin_count = bin_count_settings.bin_count.value
        wants_scaled = bin_count_settings.wants_scaled.value
        maximum_radius = bin_count_settings.maximum_radius.value

        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        labels, pixel_data = cpo.crop_labels_and_image(objects.segmented,
                                                       image.pixel_data)
        nobjects = np.max(objects.segmented)
        measurements = workspace.measurements
        assert isinstance(measurements, cpmeas.Measurements)
        heatmaps = {}
        for heatmap in self.heatmaps:
            if heatmap.object_name.get_objects_name() == object_name and \
                            image_name == heatmap.image_name.get_image_name() and \
                            heatmap.get_number_of_bins() == bin_count:
                dd[id(heatmap)] = \
                    heatmaps[MEASUREMENT_ALIASES[heatmap.measurement.value]] = \
                    np.zeros(labels.shape)
        if nobjects == 0:
            for bin in range(1, bin_count + 1):
                for feature in (F_FRAC_AT_D, F_MEAN_FRAC, F_RADIAL_CV):
                    feature_name = (
                        (feature + FF_GENERIC) % (image_name, bin, bin_count))
                    measurements.add_measurement(
                            object_name, "_".join([M_CATEGORY, feature_name]),
                            np.zeros(0))
                    if not wants_scaled:
                        measurement_name = "_".join([M_CATEGORY, feature,
                                                     image_name, FF_OVERFLOW])
                        measurements.add_measurement(
                                object_name, measurement_name, np.zeros(0))
            return [(image_name, object_name, "no objects", "-", "-", "-", "-")]
        name = (object_name if center_object_name is None
                else "%s_%s" % (object_name, center_object_name))
        if dd.has_key(name):
            normalized_distance, i_center, j_center, good_mask = dd[name]
        else:
            d_to_edge = distance_to_edge(labels)
            if center_object_name is not None:
                #
                # Use the center of the centering objects to assign a center
                # to each labeled pixel using propagation
                #
                center_objects = workspace.object_set.get_objects(center_object_name)
                center_labels, cmask = cpo.size_similarly(
                        labels, center_objects.segmented)
                pixel_counts = fix(scind.sum(
                        np.ones(center_labels.shape),
                        center_labels,
                        np.arange(1, np.max(center_labels) + 1, dtype=np.int32)))
                good = pixel_counts > 0
                i, j = (centers_of_labels(center_labels) + .5).astype(int)
                ig = i[good]
                jg = j[good]
                lg = np.arange(1, len(i) + 1)[good]
                if center_choice == C_CENTERS_OF_OTHER:
                    #
                    # Reduce the propagation labels to the centers of
                    # the centering objects
                    #
                    center_labels = np.zeros(center_labels.shape, int)
                    center_labels[ig, jg] = lg
                cl, d_from_center = propagate(np.zeros(center_labels.shape),
                                              center_labels,
                                              labels != 0, 1)
                #
                # Erase the centers that fall outside of labels
                #
                cl[labels == 0] = 0
                #
                # If objects are hollow or crescent-shaped, there may be
                # objects without center labels. As a backup, find the
                # center that is the closest to the center of mass.
                #
                missing_mask = (labels != 0) & (cl == 0)
                missing_labels = np.unique(labels[missing_mask])
                if len(missing_labels):
                    all_centers = centers_of_labels(labels)
                    missing_i_centers, missing_j_centers = \
                        all_centers[:, missing_labels - 1]
                    di = missing_i_centers[:, np.newaxis] - ig[np.newaxis, :]
                    dj = missing_j_centers[:, np.newaxis] - jg[np.newaxis, :]
                    missing_best = lg[np.argsort((di * di + dj * dj,))[:, 0]]
                    best = np.zeros(np.max(labels) + 1, int)
                    best[missing_labels] = missing_best
                    cl[missing_mask] = best[labels[missing_mask]]
                    #
                    # Now compute the crow-flies distance to the centers
                    # of these pixels from whatever center was assigned to
                    # the object.
                    #
                    iii, jjj = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
                    di = iii[missing_mask] - i[cl[missing_mask] - 1]
                    dj = jjj[missing_mask] - j[cl[missing_mask] - 1]
                    d_from_center[missing_mask] = np.sqrt(di * di + dj * dj)
            else:
                # Find the point in each object farthest away from the edge.
                # This does better than the centroid:
                # * The center is within the object
                # * The center tends to be an interesting point, like the
                #   center of the nucleus or the center of one or the other
                #   of two touching cells.
                #
                i, j = maximum_position_of_labels(d_to_edge, labels, objects.indices)
                center_labels = np.zeros(labels.shape, int)
                center_labels[i, j] = labels[i, j]
                #
                # Use the coloring trick here to process touching objects
                # in separate operations
                #
                colors = color_labels(labels)
                ncolors = np.max(colors)
                d_from_center = np.zeros(labels.shape)
                cl = np.zeros(labels.shape, int)
                for color in range(1, ncolors + 1):
                    mask = colors == color
                    l, d = propagate(np.zeros(center_labels.shape),
                                     center_labels,
                                     mask, 1)
                    d_from_center[mask] = d[mask]
                    cl[mask] = l[mask]
            good_mask = cl > 0
            if center_choice == C_EDGES_OF_OTHER:
                # Exclude pixels within the centering objects
                # when performing calculations from the centers
                good_mask = good_mask & (center_labels == 0)
            i_center = np.zeros(cl.shape)
            i_center[good_mask] = i[cl[good_mask] - 1]
            j_center = np.zeros(cl.shape)
            j_center[good_mask] = j[cl[good_mask] - 1]

            normalized_distance = np.zeros(labels.shape)
            if wants_scaled:
                total_distance = d_from_center + d_to_edge
                normalized_distance[good_mask] = (d_from_center[good_mask] /
                                                  (total_distance[good_mask] + .001))
            else:
                normalized_distance[good_mask] = \
                    d_from_center[good_mask] / maximum_radius
            dd[name] = [normalized_distance, i_center, j_center, good_mask]
        ngood_pixels = np.sum(good_mask)
        good_labels = labels[good_mask]
        bin_indexes = (normalized_distance * bin_count).astype(int)
        bin_indexes[bin_indexes > bin_count] = bin_count
        labels_and_bins = (good_labels - 1, bin_indexes[good_mask])
        histogram = coo_matrix((pixel_data[good_mask], labels_and_bins),
                               (nobjects, bin_count + 1)).toarray()
        sum_by_object = np.sum(histogram, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_distance = histogram / sum_by_object_per_bin
        number_at_distance = coo_matrix((np.ones(ngood_pixels), labels_and_bins),
                                        (nobjects, bin_count + 1)).toarray()
        object_mask = number_at_distance > 0
        sum_by_object = np.sum(number_at_distance, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_bin = number_at_distance / sum_by_object_per_bin
        mean_pixel_fraction = fraction_at_distance / (fraction_at_bin +
                                                      np.finfo(float).eps)
        masked_fraction_at_distance = masked_array(fraction_at_distance,
                                                   ~object_mask)
        masked_mean_pixel_fraction = masked_array(mean_pixel_fraction,
                                                  ~object_mask)
        # Anisotropy calculation.  Split each cell into eight wedges, then
        # compute coefficient of variation of the wedges' mean intensities
        # in each ring.
        #
        # Compute each pixel's delta from the center object's centroid
        i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
        imask = i[good_mask] > i_center[good_mask]
        jmask = j[good_mask] > j_center[good_mask]
        absmask = (abs(i[good_mask] - i_center[good_mask]) >
                   abs(j[good_mask] - j_center[good_mask]))
        radial_index = (imask.astype(int) + jmask.astype(int) * 2 +
                        absmask.astype(int) * 4)
        statistics = []

        for bin in range(bin_count + (0 if wants_scaled else 1)):
            bin_mask = (good_mask & (bin_indexes == bin))
            bin_pixels = np.sum(bin_mask)
            bin_labels = labels[bin_mask]
            bin_radial_index = radial_index[bin_indexes[good_mask] == bin]
            labels_and_radii = (bin_labels - 1, bin_radial_index)
            radial_values = coo_matrix((pixel_data[bin_mask],
                                        labels_and_radii),
                                       (nobjects, 8)).toarray()
            pixel_count = coo_matrix((np.ones(bin_pixels), labels_and_radii),
                                     (nobjects, 8)).toarray()
            mask = pixel_count == 0
            radial_means = masked_array(radial_values / pixel_count, mask)
            radial_cv = np.std(radial_means, 1) / np.mean(radial_means, 1)
            radial_cv[np.sum(~mask, 1) == 0] = 0
            for measurement, feature, overflow_feature in (
                    (fraction_at_distance[:, bin], MF_FRAC_AT_D, OF_FRAC_AT_D),
                    (mean_pixel_fraction[:, bin], MF_MEAN_FRAC, OF_MEAN_FRAC),
                    (np.array(radial_cv), MF_RADIAL_CV, OF_RADIAL_CV)):

                if bin == bin_count:
                    measurement_name = overflow_feature % image_name
                else:
                    measurement_name = feature % (image_name, bin + 1, bin_count)
                measurements.add_measurement(object_name,
                                             measurement_name,
                                             measurement)
                if feature in heatmaps:
                    heatmaps[feature][bin_mask] = measurement[bin_labels - 1]
            radial_cv.mask = np.sum(~mask, 1) == 0
            bin_name = str(bin + 1) if bin < bin_count else "Overflow"
            statistics += [(image_name, object_name, bin_name, str(bin_count),
                            round(np.mean(masked_fraction_at_distance[:, bin]), 4),
                            round(np.mean(masked_mean_pixel_fraction[:, bin]), 4),
                            round(np.mean(radial_cv), 4))]
        return statistics
Exemplo n.º 19
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        dimensions = len(objects.shape)
        assert isinstance(objects, Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        neighbor_labels = neighbor_objects.small_removed_segmented
        neighbor_kept_labels = neighbor_objects.segmented
        assert isinstance(neighbor_objects, Objects)
        if not self.wants_excluded_objects.value:
            # Remove labels not present in kept segmentation while preserving object IDs.
            mask = neighbor_kept_labels > 0
            neighbor_labels[~mask] = 0
        nobjects = numpy.max(labels)
        nkept_objects = len(objects.indices)
        nneighbors = numpy.max(neighbor_labels)

        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
            neighbor_has_pixels = has_pixels
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.small_removed_segmented)
            neighbor_has_pixels = numpy.bincount(
                neighbor_labels.ravel())[1:] > 0
        neighbor_count = numpy.zeros((nobjects, ))
        pixel_count = numpy.zeros((nobjects, ))
        first_object_number = numpy.zeros((nobjects, ), int)
        second_object_number = numpy.zeros((nobjects, ), int)
        first_x_vector = numpy.zeros((nobjects, ))
        second_x_vector = numpy.zeros((nobjects, ))
        first_y_vector = numpy.zeros((nobjects, ))
        second_y_vector = numpy.zeros((nobjects, ))
        angle = numpy.zeros((nobjects, ))
        percent_touching = numpy.zeros((nobjects, ))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            if dimensions == 2:
                i, j = scipy.ndimage.distance_transform_edt(
                    labels == 0, return_distances=False, return_indices=True)
                # Assign each background pixel to the label of its nearest
                # foreground pixel. Assign label to label for foreground.
                labels = labels[i, j]
            else:
                k, i, j = scipy.ndimage.distance_transform_edt(
                    labels == 0, return_distances=False, return_indices=True)
                labels = labels[k, i, j]
            expanded_labels = labels  # for display
            distance = 1  # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = numpy.arange(nobjects, dtype=numpy.int32) + 1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(
                scipy.ndimage.sum(numpy.ones(labels.shape), labels,
                                  object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(
                scipy.ndimage.sum(numpy.ones(labels.shape), perimeter_outlines,
                                  object_indexes))

            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            order = numpy.zeros((nobjects, min(nneighbors, 3)),
                                dtype=numpy.uint32)
            j = numpy.arange(nneighbors)
            # (0, 1, 2) unless there are less than 3 neighbors
            partition_keys = tuple(range(min(nneighbors, 3)))
            for i in range(nobjects):
                dr = numpy.sqrt((ocenters[i, 0] - ncenters[j, 0])**2 +
                                (ocenters[i, 1] - ncenters[j, 1])**2)
                order[i, :] = numpy.argpartition(dr, partition_keys)[:3]

            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index, 1] - ocenters[:, 1]
            first_y_vector = ncenters[first_object_index, 0] - ocenters[:, 0]
            if nneighbors > first_neighbor + 1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,
                                           1] - ocenters[:, 1]
                second_y_vector = ncenters[second_object_index,
                                           0] - ocenters[:, 0]
                v1 = numpy.array((first_x_vector, first_y_vector))
                v2 = numpy.array((second_x_vector, second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = numpy.sum(v1 * v2, 0) / numpy.sqrt(
                    numpy.sum(v1**2, 0) * numpy.sum(v2**2, 0))
                angle = numpy.arccos(dot) * 180.0 / numpy.pi

            # Make the structuring element for dilation
            if dimensions == 2:
                strel = strel_disk(distance)
            else:
                strel = skimage.morphology.ball(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            if dimensions == 2:
                strel_touching = strel_disk(distance + 0.5)
            else:
                strel_touching = skimage.morphology.ball(distance + 0.5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            if dimensions == 2:
                i, j = numpy.mgrid[0:labels.shape[0], 0:labels.shape[1]]

                minimums_i, maximums_i, _, _ = scipy.ndimage.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scipy.ndimage.extrema(
                    j, labels, object_indexes)

                minimums_i = numpy.maximum(fix(minimums_i) - distance,
                                           0).astype(int)
                maximums_i = numpy.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_j = numpy.maximum(fix(minimums_j) - distance,
                                           0).astype(int)
                maximums_j = numpy.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[1]).astype(int)
            else:
                k, i, j = numpy.mgrid[0:labels.shape[0], 0:labels.shape[1],
                                      0:labels.shape[2]]

                minimums_k, maximums_k, _, _ = scipy.ndimage.extrema(
                    k, labels, object_indexes)
                minimums_i, maximums_i, _, _ = scipy.ndimage.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scipy.ndimage.extrema(
                    j, labels, object_indexes)

                minimums_k = numpy.maximum(fix(minimums_k) - distance,
                                           0).astype(int)
                maximums_k = numpy.minimum(
                    fix(maximums_k) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_i = numpy.maximum(fix(minimums_i) - distance,
                                           0).astype(int)
                maximums_i = numpy.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[1]).astype(int)
                minimums_j = numpy.maximum(fix(minimums_j) - distance,
                                           0).astype(int)
                maximums_j = numpy.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[2]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g., not renumbered.
                    #
                    continue
                index = object_number - 1
                if dimensions == 2:
                    patch = labels[minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]
                else:
                    patch = labels[minimums_k[index]:maximums_k[index],
                                   minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]

                #
                # Find the neighbors
                #
                patch_mask = patch == (index + 1)
                if distance <= 5:
                    extended = scipy.ndimage.binary_dilation(patch_mask, strel)
                else:
                    extended = (scipy.signal.fftconvolve(
                        patch_mask, strel, mode="same") > 0.5)
                neighbors = numpy.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(numpy.ones(nc, int) * object_number)
                    second_objects.append(neighbors)
                #
                # Find the # of overlapping pixels. Dilate the neighbors
                # and see how many pixels overlap our image. Use a 3x3
                # structuring element to expand the overlapping edge
                # into the perimeter.
                #
                if dimensions == 2:
                    outline_patch = (perimeter_outlines[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                else:
                    outline_patch = (perimeter_outlines[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                if self.neighbors_are_objects:
                    extendme = (patch != 0) & (patch != object_number)
                    if distance <= 5:
                        extended = scipy.ndimage.binary_dilation(
                            extendme, strel_touching)
                    else:
                        extended = (scipy.signal.fftconvolve(
                            extendme, strel_touching, mode="same") > 0.5)
                else:
                    if distance <= 5:
                        extended = scipy.ndimage.binary_dilation(
                            (npatch != 0), strel_touching)
                    else:
                        extended = (scipy.signal.fftconvolve(
                            (npatch != 0), strel_touching, mode="same") > 0.5)
                overlap = numpy.sum(outline_patch & extended)
                pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = numpy.hstack(first_objects)
                reverse_object_numbers = numpy.zeros(
                    max(numpy.max(object_numbers), numpy.max(first_objects)) +
                    1, int)
                reverse_object_numbers[object_numbers] = (
                    numpy.arange(len(object_numbers)) + 1)
                first_objects = reverse_object_numbers[first_objects]

                second_objects = numpy.hstack(second_objects)
                reverse_neighbor_numbers = numpy.zeros(
                    max(numpy.max(neighbor_numbers), numpy.max(second_objects))
                    + 1, int)
                reverse_neighbor_numbers[neighbor_numbers] = (
                    numpy.arange(len(neighbor_numbers)) + 1)
                second_objects = reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects = second_objects[to_keep]
            else:
                first_objects = numpy.zeros(0, int)
                second_objects = numpy.zeros(0, int)
            percent_touching = pixel_count * 100 / perimeters
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = numpy.zeros(nkept_objects, int)
            second_object_number = numpy.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, numpy.newaxis], 0] -
                      ncenters[neighbor_indexes[numpy.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, numpy.newaxis], 1] -
                      ncenters[neighbor_indexes[numpy.newaxis, :], 1])
                distance_matrix = numpy.sqrt(di * di + dj * dj)
                distance_matrix[~has_pixels, :] = numpy.inf
                distance_matrix[:, ~neighbor_has_pixels] = numpy.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = numpy.lexsort([distance_matrix
                                       ]).astype(first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels, 1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels, 0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = numpy.zeros(0, int)
            second_objects = numpy.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert isinstance(workspace, Workspace)
        m = workspace.measurements
        assert isinstance(m, Measurements)
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (
                M_FIRST_CLOSEST_DISTANCE,
                numpy.sqrt(first_x_vector**2 + first_y_vector**2),
            ),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (
                M_SECOND_CLOSEST_DISTANCE,
                numpy.sqrt(second_x_vector**2 + second_y_vector**2),
            ),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle),
            (M_PERCENT_TOUCHING, percent_touching),
        ]
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name), data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num,
                NEIGHBORS,
                self.object_name.value,
                self.object_name.value
                if self.neighbors_are_objects else self.neighbors_name.value,
                m.image_set_number * numpy.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * numpy.ones(second_objects.shape, int),
                second_objects,
            )

        labels = kept_labels

        neighbor_count_image = numpy.zeros(labels.shape, int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask] - 1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image

        percent_touching_image = numpy.zeros(labels.shape)
        percent_touching_image[object_mask] = percent_touching[object_indexes]
        workspace.display_data.percent_touching_image = percent_touching_image

        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            count_image = Image(img, masking_objects=objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = "Blues"
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            touching_image = Image(img, masking_objects=objects)
            image_set.add(self.touching_image_name.value, touching_image)
        else:
            percent_touching_cm_name = "Oranges"
            percent_touching_cm = matplotlib.cm.get_cmap(
                percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.neighbor_labels = neighbor_labels
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
            workspace.display_data.dimensions = dimensions