def run(self, workspace):
        image_name = self.image_name.value
        objects_name = self.objects_name.value
        image = workspace.image_set.get_image(image_name)
        pixel_data = image.pixel_data

        labels = workspace.interaction_request(
                self, pixel_data, workspace.measurements.image_set_number)
        if labels is None:
            # User cancelled. Soldier on as best we can.
            workspace.cancel_request()
            labels = np.zeros(pixel_data.shape[:2], int)
        objects = cpo.Objects()
        objects.segmented = labels
        workspace.object_set.add_objects(objects, objects_name)

        ##################
        #
        # Add measurements
        #
        m = workspace.measurements
        #
        # The object count
        #
        object_count = np.max(labels)
        I.add_object_count_measurements(m, objects_name, object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements(m, objects_name, labels)

        workspace.display_data.labels = labels
        workspace.display_data.pixel_data = pixel_data
 def run(self, workspace):
     statistics = []
     m = workspace.measurements
     assert isinstance(m, cpmeas.Measurements)
     image_set = workspace.image_set
     for file_setting in self.file_settings:
         wants_images = self.file_wants_images(file_setting)
         image_name = file_setting.image_name.value if wants_images else \
             file_setting.objects_name.value
         m_path, m_file, m_md5_digest, m_scaling, m_height, m_width = [
             "_".join((c, image_name)) for c in (
                 C_PATH_NAME if wants_images else C_OBJECTS_PATH_NAME,
                 C_FILE_NAME if wants_images else C_OBJECTS_FILE_NAME,
                 C_MD5_DIGEST, C_SCALING, C_HEIGHT, C_WIDTH)]
         pathname = m.get_current_image_measurement(m_path)
         filename = m.get_current_image_measurement(m_file)
         rescale = (wants_images and file_setting.rescale.value)
         
         provider = LoadImagesImageProvider(
             image_name, pathname, filename, rescale)
         image = provider.provide_image(image_set)
         pixel_data = image.pixel_data
         digest = hashlib.md5()
         if wants_images:
             digest.update(np.ascontiguousarray(pixel_data).data)
             m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)), 
                                     digest.hexdigest())
             m.add_image_measurement("_".join((C_SCALING, image_name)),
                                      image.scale)
             m.add_image_measurement("_".join((C_HEIGHT, image_name)),
                                     int(pixel_data.shape[0]))
             m.add_image_measurement("_".join((C_WIDTH, image_name)),
                                     int(pixel_data.shape[1]))
             image_set.providers.append(provider)
         else:
             #
             # Turn image into objects
             #
             labels = convert_image_to_objects(pixel_data)
             objects = cpo.Objects()
             objects.segmented = labels
             object_set = workspace.object_set
             assert isinstance(object_set, cpo.ObjectSet)
             object_set.add_objects(objects, image_name)
             add_object_count_measurements(m, image_name, objects.count)
             add_object_location_measurements(m, image_name, labels)
             #
             # Add outlines if appropriate
             #
             if file_setting.wants_outlines:
                 outlines = cellprofiler.cpmath.outline.outline(labels)
                 outline_image = cpi.Image(outlines.astype(bool))
                 workspace.image_set.add(file_setting.outlines_name.value,
                                         outline_image)
         statistics += [(image_name, filename)]
     workspace.display_data.col_labels = ("Image name","File")
     workspace.display_data.statistics = statistics
Example #3
0
 def make_objects(self, workspace, labels, nworms):
     m = workspace.measurements
     assert isinstance(m, cpmeas.Measurements)
     object_set = workspace.object_set
     assert isinstance(object_set, cpo.ObjectSet)
     straightened_objects_name = self.straightened_objects_name.value
     straightened_objects = cpo.Objects()
     straightened_objects.segmented = labels
     object_set.add_objects(straightened_objects, straightened_objects_name)
     add_object_count_measurements(m, straightened_objects_name, nworms)
     add_object_location_measurements(m, straightened_objects_name,
                                      labels, nworms)
    def run(self, workspace):
        image_name    = self.image_name.value
        objects_name  = self.objects_name.value
        outlines_name = self.outlines_name.value
        image         = workspace.image_set.get_image(image_name)
        pixel_data    = image.pixel_data
        
        labels = np.zeros(pixel_data.shape[:2], int)
        self.do_ui(workspace, pixel_data, labels)
        objects = cpo.Objects()
        objects.segmented = labels
        workspace.object_set.add_objects(objects, objects_name)

        ##################
        #
        # Add measurements
        #
        m = workspace.measurements
        #
        # The object count
        #
        object_count = np.max(labels)
        I.add_object_count_measurements(m, objects_name, object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements(m, objects_name, labels)
        #
        # Outlines if we want them
        #
        if self.wants_outlines:
            outlines_name = self.outlines_name.value
            outlines = outline(labels)
            outlines_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(outlines_name, outlines_image)
        #
        # Do the drawing here
        #
        if workspace.frame is not None:
            figure = workspace.create_or_find_figure(title="IdentifyObjectsManually, image cycle #%d"%(
                workspace.measurements.image_set_number),subplots=(2,1))
            figure.subplot_imshow_labels(0, 0, labels, objects_name)
            figure.subplot_imshow(1, 0, self.draw_outlines(pixel_data, labels),
                                  sharex = figure.subplot(0,0),
                                  sharey = figure.subplot(0,0))
    def run(self, workspace):
        image_name    = self.image_name.value
        objects_name  = self.objects_name.value
        outlines_name = self.outlines_name.value
        image         = workspace.image_set.get_image(image_name)
        pixel_data    = image.pixel_data
        
        labels = workspace.interaction_request(
            self, pixel_data, workspace.measurements.image_set_number)
        objects = cpo.Objects()
        objects.segmented = labels
        workspace.object_set.add_objects(objects, objects_name)

        ##################
        #
        # Add measurements
        #
        m = workspace.measurements
        #
        # The object count
        #
        object_count = np.max(labels)
        I.add_object_count_measurements(m, objects_name, object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements(m, objects_name, labels)
        #
        # Outlines if we want them
        #
        if self.wants_outlines:
            outlines_name = self.outlines_name.value
            outlines = outline(labels)
            outlines_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(outlines_name, outlines_image)

        workspace.display_data.labels = labels
        workspace.display_data.pixel_data = pixel_data
    def run(self, workspace):
        image_name = self.image_name.value
        objects_name = self.objects_name.value
        outlines_name = self.outlines_name.value
        image = workspace.image_set.get_image(image_name)
        pixel_data = image.pixel_data

        labels = workspace.interaction_request(self, pixel_data)
        objects = cpo.Objects()
        objects.segmented = labels
        workspace.object_set.add_objects(objects, objects_name)

        ##################
        #
        # Add measurements
        #
        m = workspace.measurements
        #
        # The object count
        #
        object_count = np.max(labels)
        I.add_object_count_measurements(m, objects_name, object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements(m, objects_name, labels)
        #
        # Outlines if we want them
        #
        if self.wants_outlines:
            outlines_name = self.outlines_name.value
            outlines = outline(labels)
            outlines_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(outlines_name, outlines_image)

        workspace.display_data.labels = labels
        workspace.display_data.pixel_data = pixel_data
    def run(self, workspace):
        statistics = []
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        #
        # Hack: if LoadSingleImage is first, no paths are populated
        #
        if self.file_wants_images(self.file_settings[0]):
            m_path = "_".join((C_PATH_NAME, self.file_settings[0].image_name.value))
        else:
            m_path = "_".join((C_OBJECTS_PATH_NAME, self.file_settings[0].objects_name.value))
        if m.get_current_image_measurement(m_path) is None:
            self.prepare_run(workspace)

        image_set = workspace.image_set
        for file_setting in self.file_settings:
            wants_images = self.file_wants_images(file_setting)
            image_name = file_setting.image_name.value if wants_images else file_setting.objects_name.value
            m_path, m_file, m_md5_digest, m_scaling, m_height, m_width = [
                "_".join((c, image_name))
                for c in (
                    C_PATH_NAME if wants_images else C_OBJECTS_PATH_NAME,
                    C_FILE_NAME if wants_images else C_OBJECTS_FILE_NAME,
                    C_MD5_DIGEST,
                    C_SCALING,
                    C_HEIGHT,
                    C_WIDTH,
                )
            ]
            pathname = m.get_current_image_measurement(m_path)
            filename = m.get_current_image_measurement(m_file)
            rescale = wants_images and file_setting.rescale.value

            provider = LoadImagesImageProvider(image_name, pathname, filename, rescale)
            image = provider.provide_image(image_set)
            pixel_data = image.pixel_data
            if wants_images:
                md5 = provider.get_md5_hash(m)
                m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)), md5)
                m.add_image_measurement("_".join((C_SCALING, image_name)), image.scale)
                m.add_image_measurement("_".join((C_HEIGHT, image_name)), int(pixel_data.shape[0]))
                m.add_image_measurement("_".join((C_WIDTH, image_name)), int(pixel_data.shape[1]))
                image_set.providers.append(provider)
            else:
                #
                # Turn image into objects
                #
                labels = convert_image_to_objects(pixel_data)
                objects = cpo.Objects()
                objects.segmented = labels
                object_set = workspace.object_set
                assert isinstance(object_set, cpo.ObjectSet)
                object_set.add_objects(objects, image_name)
                add_object_count_measurements(m, image_name, objects.count)
                add_object_location_measurements(m, image_name, labels)
                #
                # Add outlines if appropriate
                #
                if file_setting.wants_outlines:
                    outlines = centrosome.outline.outline(labels)
                    outline_image = cpi.Image(outlines.astype(bool))
                    workspace.image_set.add(file_setting.outlines_name.value, outline_image)
            statistics += [(image_name, filename)]
        workspace.display_data.col_labels = ("Image name", "File")
        workspace.display_data.statistics = statistics
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image = workspace.image_set.get_image(self.image_name.value,
                                              must_be_grayscale = True)
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        elif self.threshold_method == cpthresh.TM_BINARY_IMAGE:
            binary_image = workspace.image_set.get_image(self.binary_image.value,
                                                         must_be_binary = True)
            local_threshold = np.ones(img.shape) * np.max(img) + np.finfo(float).eps
            local_threshold[binary_image.pixel_data] = np.min(img) - np.finfo(float).eps
            global_threshold = cellprofiler.cpmath.otsu.otsu(img[mask],
                        self.threshold_range.min,
                        self.threshold_range.max)
            has_threshold = True
        else:
            local_threshold,global_threshold = self.get_threshold(img, mask, None, workspace)
            has_threshold = True
        
        if has_threshold:
            thresholded_image = img > local_threshold
        
        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0,:], labels_in[-1,:], labels_in[:,0], labels_in[:,-1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in)+1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]
        
        labels_in[(~ is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp
        
        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances,(i,j) = scind.distance_transform_edt(labels_in == 0, 
                                                               return_indices = True)
                labels_out = np.zeros(labels_in.shape,int)
                dilate_mask = distances <= self.distance_to_dilate.value 
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in, 
                                                  thresholded_image,
                                                  1.0)
                labels_out[distances>self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in>0] 
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            # 
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, 
                                             thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image, 
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1-img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img, 
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                                objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out,
                                   objects.segmented,
                                   range(np.max(objects.segmented)+1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image = image)
                workspace.image_set.add(self.new_primary_outlines_name.value, 
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out) 
        if workspace.frame != None:
            object_area = np.sum(segmented_out > 0)
            object_pct = 100 * object_area / np.product(segmented_out.shape)
                
            my_frame=workspace.create_or_find_figure(title="IdentifySecondaryObjects, image cycle #%d"%(
                workspace.measurements.image_set_number),subplots=(2,2))
            title = "Input image, cycle #%d"%(workspace.image_set.number+1)
            my_frame.subplot_imshow_grayscale(0, 0, img, title)
            my_frame.subplot_imshow_labels(1, 0, segmented_out, "Labeled image",
                                           sharex = my_frame.subplot(0,0),
                                           sharey = my_frame.subplot(0,0))

            outline_img = np.dstack((img, img, img))
            cpmi.draw_outline(outline_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(0, 1, outline_img, "Outlined image",
                                    normalize=False,
                                    sharex = my_frame.subplot(0,0),
                                    sharey = my_frame.subplot(0,0))
            
            primary_img = np.dstack((img, img, img))
            cpmi.draw_outline(primary_img, primary_outline > 0,
                              cpprefs.get_primary_outline_color())
            cpmi.draw_outline(primary_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(1, 1, primary_img,
                                    "Primary and output outlines",
                                    normalize=False,
                                    sharex = my_frame.subplot(0,0),
                                    sharey = my_frame.subplot(0,0))
            if global_threshold is not None:
                my_frame.status_bar.SetFields(
                    ["Threshold: %.3f" % global_threshold,
                     "Area covered by objects: %.1f %%" % object_pct])
            else:
                my_frame.status_bar.SetFields(
                    ["Area covered by objects: %.1f %%" % object_pct])
        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image = image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add the background measurements if made
        #
        measurements = workspace.measurements
        if has_threshold:
            if isinstance(local_threshold,np.ndarray):
                ave_threshold = np.mean(local_threshold)
            else:
                ave_threshold = local_threshold
            
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_FINAL_THRESHOLD%(objname),
                                         np.array([ave_threshold],
                                                     dtype=float))
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_ORIG_THRESHOLD%(objname),
                                         np.array([global_threshold],
                                                      dtype=float))
            wv = cpthresh.weighted_variance(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_WEIGHTED_VARIANCE%(objname),
                                         np.array([wv],dtype=float))
            entropies = cpthresh.sum_of_entropies(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_SUM_OF_ENTROPIES%(objname),
                                         np.array([entropies],dtype=float))
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT%objname,
                                     children_per_parent)
        measurements.add_measurement(objname,
                                     cpmi.FF_PARENT%self.primary_objects.value,
                                     parents_of_children)
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(new_objects,
                                             self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(measurements,
                                               self.new_primary_objects_name.value,
                                               np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(measurements,
                                                  self.new_primary_objects_name.value,
                                                  new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value,
                 new_objects, self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value,
                 objects_out, objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(parent_name,
                                             cpmi.FF_CHILDREN_COUNT%child_name,
                                             children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT%parent_name,
                                             parents_of_children)
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image = workspace.image_set.get_image(self.image_name.value,
                                              must_be_grayscale=True)
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        elif self.threshold_method == cpthresh.TM_BINARY_IMAGE:
            binary_image = workspace.image_set.get_image(
                self.binary_image.value, must_be_binary=True)
            local_threshold = np.ones(
                img.shape) * np.max(img) + np.finfo(float).eps
            local_threshold[
                binary_image.pixel_data] = np.min(img) - np.finfo(float).eps
            global_threshold = cellprofiler.cpmath.otsu.otsu(
                img[mask], self.threshold_range.min, self.threshold_range.max)
            has_threshold = True
        else:
            local_threshold, global_threshold = self.get_threshold(
                img, mask, None, workspace)
            has_threshold = True

        if has_threshold:
            thresholded_image = img > local_threshold

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0, :], labels_in[-1, :], labels_in[:,
                                                          0], labels_in[:,
                                                                        -1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in) + 1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances, (i, j) = scind.distance_transform_edt(
                    labels_in == 0, return_indices=True)
                labels_out = np.zeros(labels_in.shape, int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in,
                                                  thresholded_image, 1.0)
                labels_out[distances > self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in > 0]
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1 - img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out, objects.segmented,
                                   range(np.max(objects.segmented) + 1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image=image)
                workspace.image_set.add(self.new_primary_outlines_name.value,
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)
        if workspace.frame != None:
            object_area = np.sum(segmented_out > 0)
            object_pct = 100 * object_area / np.product(segmented_out.shape)

            my_frame = workspace.create_or_find_figure(
                title="IdentifySecondaryObjects, image cycle #%d" %
                (workspace.measurements.image_set_number),
                subplots=(2, 2))
            title = "Input image, cycle #%d" % (workspace.image_set.number + 1)
            my_frame.subplot_imshow_grayscale(0, 0, img, title)
            my_frame.subplot_imshow_labels(1,
                                           0,
                                           segmented_out,
                                           "Labeled image",
                                           sharex=my_frame.subplot(0, 0),
                                           sharey=my_frame.subplot(0, 0))

            outline_img = np.dstack((img, img, img))
            cpmi.draw_outline(outline_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(0,
                                    1,
                                    outline_img,
                                    "Outlined image",
                                    normalize=False,
                                    sharex=my_frame.subplot(0, 0),
                                    sharey=my_frame.subplot(0, 0))

            primary_img = np.dstack((img, img, img))
            cpmi.draw_outline(primary_img, primary_outline > 0,
                              cpprefs.get_primary_outline_color())
            cpmi.draw_outline(primary_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(1,
                                    1,
                                    primary_img,
                                    "Primary and output outlines",
                                    normalize=False,
                                    sharex=my_frame.subplot(0, 0),
                                    sharey=my_frame.subplot(0, 0))
            if global_threshold is not None:
                my_frame.status_bar.SetFields([
                    "Threshold: %.3f" % global_threshold,
                    "Area covered by objects: %.1f %%" % object_pct
                ])
            else:
                my_frame.status_bar.SetFields(
                    ["Area covered by objects: %.1f %%" % object_pct])
        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image=image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add the background measurements if made
        #
        measurements = workspace.measurements
        if has_threshold:
            if isinstance(local_threshold, np.ndarray):
                ave_threshold = np.mean(local_threshold)
            else:
                ave_threshold = local_threshold

            measurements.add_measurement(
                cpmeas.IMAGE, cpmi.FF_FINAL_THRESHOLD % (objname),
                np.array([ave_threshold], dtype=float))
            measurements.add_measurement(
                cpmeas.IMAGE, cpmi.FF_ORIG_THRESHOLD % (objname),
                np.array([global_threshold], dtype=float))
            wv = cpthresh.weighted_variance(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_WEIGHTED_VARIANCE % (objname),
                                         np.array([wv], dtype=float))
            entropies = cpthresh.sum_of_entropies(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_SUM_OF_ENTROPIES % (objname),
                                         np.array([entropies], dtype=float))
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT % objname,
                                     children_per_parent)
        measurements.add_measurement(
            objname, cpmi.FF_PARENT % self.primary_objects.value,
            parents_of_children)
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(
                new_objects, self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(
                measurements, self.new_primary_objects_name.value,
                np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(
                measurements, self.new_primary_objects_name.value,
                new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value, new_objects,
                 self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value, objects_out,
                 objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(
                    parent_name, cpmi.FF_CHILDREN_COUNT % child_name,
                    children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT % parent_name,
                                             parents_of_children)
Example #10
0
 def run(self, workspace):
     statistics = []
     m = workspace.measurements
     assert isinstance(m, cpmeas.Measurements)
     #
     # Hack: if LoadSingleImage is first, no paths are populated
     # 
     if self.file_wants_images(self.file_settings[0]):
         m_path = "_".join((C_PATH_NAME, 
                            self.file_settings[0].image_name.value))
     else:
         m_path = "_".join((C_OBJECTS_PATH_NAME,
                            self.file_settings[0].objects_name.value))
     if m.get_current_image_measurement(m_path) is None:
         self.prepare_run(workspace)
         
     image_set = workspace.image_set
     for file_setting in self.file_settings:
         wants_images = self.file_wants_images(file_setting)
         image_name = file_setting.image_name.value if wants_images else \
             file_setting.objects_name.value
         m_path, m_file, m_md5_digest, m_scaling, m_height, m_width = [
             "_".join((c, image_name)) for c in (
                 C_PATH_NAME if wants_images else C_OBJECTS_PATH_NAME,
                 C_FILE_NAME if wants_images else C_OBJECTS_FILE_NAME,
                 C_MD5_DIGEST, C_SCALING, C_HEIGHT, C_WIDTH)]
         pathname = m.get_current_image_measurement(m_path)
         filename = m.get_current_image_measurement(m_file)
         rescale = (wants_images and file_setting.rescale.value)
         
         provider = LoadImagesImageProvider(
             image_name, pathname, filename, rescale)
         image = provider.provide_image(image_set)
         pixel_data = image.pixel_data
         digest = hashlib.md5()
         if wants_images:
             digest.update(np.ascontiguousarray(pixel_data).data)
             m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)), 
                                     digest.hexdigest())
             m.add_image_measurement("_".join((C_SCALING, image_name)),
                                      image.scale)
             m.add_image_measurement("_".join((C_HEIGHT, image_name)),
                                     int(pixel_data.shape[0]))
             m.add_image_measurement("_".join((C_WIDTH, image_name)),
                                     int(pixel_data.shape[1]))
             image_set.providers.append(provider)
         else:
             #
             # Turn image into objects
             #
             labels = convert_image_to_objects(pixel_data)
             objects = cpo.Objects()
             objects.segmented = labels
             object_set = workspace.object_set
             assert isinstance(object_set, cpo.ObjectSet)
             object_set.add_objects(objects, image_name)
             add_object_count_measurements(m, image_name, objects.count)
             add_object_location_measurements(m, image_name, labels)
             #
             # Add outlines if appropriate
             #
             if file_setting.wants_outlines:
                 outlines = cellprofiler.cpmath.outline.outline(labels)
                 outline_image = cpi.Image(outlines.astype(bool))
                 workspace.image_set.add(file_setting.outlines_name.value,
                                         outline_image)
         statistics += [(image_name, filename)]
     workspace.display_data.col_labels = ("Image name","File")
     workspace.display_data.statistics = statistics
Example #11
0
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image_name = self.image_name.value
        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        workspace.display_data.statistics = []
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        else:
            thresholded_image = self.threshold_image(image_name, workspace)
            has_threshold = True

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0, :], labels_in[-1, :], labels_in[:,
                                                          0], labels_in[:,
                                                                        -1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in) + 1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances, (i, j) = scind.distance_transform_edt(
                    labels_in == 0, return_indices=True)
                labels_out = np.zeros(labels_in.shape, int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in,
                                                  thresholded_image, 1.0)
                labels_out[distances > self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in > 0]
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1 - img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out, objects.segmented,
                                   range(np.max(objects.segmented) + 1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image=image)
                workspace.image_set.add(self.new_primary_outlines_name.value,
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)

        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image=image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add measurements
        #
        measurements = workspace.measurements
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT % objname,
                                     children_per_parent)
        measurements.add_measurement(
            objname, cpmi.FF_PARENT % self.primary_objects.value,
            parents_of_children)
        image_numbers = np.ones(len(parents_of_children), int) *\
            measurements.image_set_number
        mask = parents_of_children > 0
        measurements.add_relate_measurement(
            self.module_num, R_PARENT, self.primary_objects.value,
            self.objects_name.value, image_numbers[mask],
            parents_of_children[mask], image_numbers[mask],
            np.arange(1,
                      len(parents_of_children) + 1)[mask])
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(
                new_objects, self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(
                measurements, self.new_primary_objects_name.value,
                np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(
                measurements, self.new_primary_objects_name.value,
                new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value, new_objects,
                 self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value, objects_out,
                 objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(
                    parent_name, cpmi.FF_CHILDREN_COUNT % child_name,
                    children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT % parent_name,
                                             parents_of_children)
        if self.show_window:
            object_area = np.sum(segmented_out > 0)
            workspace.display_data.object_pct = \
                100 * object_area / np.product(segmented_out.shape)
            workspace.display_data.img = img
            workspace.display_data.segmented_out = segmented_out
            workspace.display_data.primary_labels = objects.segmented
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.object_count = object_count
Example #12
0
 def run(self, workspace):
     dict = self.get_file_names(workspace)
     root = self.get_base_directory(workspace)
     statistics = [("Image name","File")]
     m = workspace.measurements
     for image_name in dict.keys():
         file_settings = self.get_file_settings(image_name)
         rescale = (file_settings.image_objects_choice == IO_IMAGES and
                    file_settings.rescale)
         provider = LoadImagesImageProvider(
             image_name, root, dict[image_name], rescale)
         image = provider.provide_image(workspace.image_set)
         pixel_data = image.pixel_data
         if file_settings.image_objects_choice == IO_IMAGES:
             workspace.image_set.providers.append(provider)
             #
             # Add measurements
             #
             path_name_category = C_PATH_NAME
             file_name_category = C_FILE_NAME
             digest = hashlib.md5()
             digest.update(np.ascontiguousarray(pixel_data).data)
             m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)), 
                                     digest.hexdigest())
             m.add_image_measurement("_".join((C_SCALING, image_name)),
                                     image.scale)
             m.add_image_measurement("_".join((C_HEIGHT, image_name)),
                                         int(pixel_data.shape[0]))
             m.add_image_measurement("_".join((C_WIDTH, image_name)),
                                         int(pixel_data.shape[1]))
         else:
             #
             # Turn image into objects
             #
             labels = convert_image_to_objects(pixel_data)
             objects = cpo.Objects()
             objects.segmented = labels
             object_set = workspace.object_set
             assert isinstance(object_set, cpo.ObjectSet)
             object_set.add_objects(objects, image_name)
             #
             # Add measurements
             #
             add_object_count_measurements(m, image_name, objects.count)
             add_object_location_measurements(m, image_name, labels)
             path_name_category = C_OBJECTS_PATH_NAME
             file_name_category = C_OBJECTS_FILE_NAME
             #
             # Add outlines if appropriate
             #
             if file_settings.wants_outlines:
                 outlines = cellprofiler.cpmath.outline.outline(labels)
                 outline_image = cpi.Image(outlines.astype(bool))
                 workspace.image_set.add(file_settings.outlines_name.value,
                                         outline_image)
             
         m.add_image_measurement(file_name_category + '_' + image_name, 
                                 dict[image_name])
         m.add_image_measurement(path_name_category + '_' + image_name, root)
             
         statistics += [(image_name, dict[image_name])]
     if workspace.frame:
         title = "Load single image: image cycle # %d"%(workspace.measurements.image_set_number+1)
         figure = workspace.create_or_find_figure(title="LoadSingleImage, image cycle #%d"%(
             workspace.measurements.image_set_number),
                                                  subplots=(1,1))
         figure.subplot_table(0,0, statistics)
    def run(self, workspace):
        """Run the module
        
        workspace    - The workspace contains
            pipeline     - instance of cpp for this run
            image_set    - the images in the image set being processed
            object_set   - the objects (labeled masks) in this image set
            measurements - the measurements for this run
            frame        - the parent frame to whatever frame is created. None means don't draw.
        """
        orig_objects_name = self.object_name.value
        filtered_objects_name = self.filtered_objects.value
        
        orig_objects = workspace.object_set.get_objects(orig_objects_name)
        assert isinstance(orig_objects, cpo.Objects)
        orig_labels = orig_objects.segmented
        mask = orig_labels != 0

        if workspace.frame is None:
            # Accept the labels as-is
            filtered_labels = orig_labels
        else:
            filtered_labels = self.filter_objects(workspace, orig_labels)
        #
        # Renumber objects consecutively if asked to do so
        #
        unique_labels = np.unique(filtered_labels)
        unique_labels = unique_labels[unique_labels != 0]
        object_count = len(unique_labels)
        if self.renumber_choice == R_RENUMBER:
            mapping = np.zeros(1 if len(unique_labels) == 0 else np.max(unique_labels)+1, int)
            mapping[unique_labels] = np.arange(1,object_count + 1)
            filtered_labels = mapping[filtered_labels]
        #
        # Make the objects out of the labels
        #
        filtered_objects = cpo.Objects()
        filtered_objects.segmented = filtered_labels
        filtered_objects.unedited_segmented = orig_objects.unedited_segmented
        filtered_objects.parent_image = orig_objects.parent_image
        workspace.object_set.add_objects(filtered_objects, 
                                         filtered_objects_name)
        #
        # Add parent/child & other measurements
        #
        m = workspace.measurements
        child_count, parents = orig_objects.relate_children(filtered_objects)
        m.add_measurement(filtered_objects_name,
                          I.FF_PARENT%(orig_objects_name),
                          parents)
        m.add_measurement(orig_objects_name,
                          I.FF_CHILDREN_COUNT%(filtered_objects_name),
                          child_count)
        #
        # The object count
        #
        I.add_object_count_measurements(m, filtered_objects_name,
                                        object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements(m, filtered_objects_name,
                                           filtered_labels)
        #
        # Outlines if we want them
        #
        if self.wants_outlines:
            outlines_name = self.outlines_name.value
            outlines = outline(filtered_labels)
            outlines_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(outlines_name, outlines_image)
        #
        # Do the drawing here
        #
        if workspace.frame is not None:
            figure = workspace.create_or_find_figure(title="EditObjectsManually, image cycle #%d"%(
                workspace.measurements.image_set_number),subplots=(2,1))
            figure.subplot_imshow_labels(0, 0, orig_labels, orig_objects_name)
            figure.subplot_imshow_labels(1, 0, filtered_labels,
                                         filtered_objects_name,
                                         sharex = figure.subplot(0,0),
                                         sharey = figure.subplot(0,0))
Example #14
0
    def run(self, workspace):
        """Run the module on the current data set
        
        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(
            self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(
            self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([
                    p_size < s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(
                    secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([
                    p_size > s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                primary_labels = secondary_objects.crop_image_similarly(
                    primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image != None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = cpo.size_similarly(secondary_labels,
                                                   primary_labels)
            if secondary_objects.parent_image != None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = cpo.size_similarly(
                        secondary_labels, tertiary_image)
        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        primary_mask = np.logical_or(primary_labels == 0, primary_outline)
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels) != 0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = cpo.Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = \
            secondary_objects.relate_children(tertiary_objects)
        child_count_of_primary, primary_parents = \
            primary_objects.relate_children(tertiary_objects)

        if workspace.frame != None:
            import cellprofiler.gui.cpfigure as cpf
            #
            # Draw the primary, secondary and tertiary labels
            # and the outlines
            #
            window_name = "CellProfiler:%s:%d" % (self.module_name,
                                                  self.module_num)
            my_frame = cpf.create_or_find(
                workspace.frame,
                title="IdentifyTertiaryObjects, image cycle #%d" %
                (workspace.measurements.image_set_number),
                name=window_name,
                subplots=(2, 2))

            title = "%s, cycle # %d" % (self.primary_objects_name.value,
                                        workspace.image_set.number + 1)
            my_frame.subplot_imshow_labels(0, 0, primary_labels, title)
            my_frame.subplot_imshow_labels(1,
                                           0,
                                           secondary_labels,
                                           self.secondary_objects_name.value,
                                           sharex=my_frame.subplot(0, 0),
                                           sharey=my_frame.subplot(0, 0))
            my_frame.subplot_imshow_labels(0,
                                           1,
                                           tertiary_labels,
                                           self.subregion_objects_name.value,
                                           sharex=my_frame.subplot(0, 0),
                                           sharey=my_frame.subplot(0, 0))
            my_frame.subplot_imshow_bw(1,
                                       1,
                                       tertiary_outlines,
                                       "Outlines",
                                       sharex=my_frame.subplot(0, 0),
                                       sharey=my_frame.subplot(0, 0))
            my_frame.Refresh()
        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count\
         in ((self.primary_objects_name, primary_parents,child_count_of_primary),
             (self.secondary_objects_name, secondary_parents, child_count_of_secondary)):
            m.add_measurement(self.subregion_objects_name.value,
                              cpmi.FF_PARENT % (parent_objects_name.value),
                              parents_of)
            m.add_measurement(
                parent_objects_name.value,
                cpmi.FF_CHILDREN_COUNT % (self.subregion_objects_name.value),
                child_count)
        object_count = np.max(tertiary_labels)
        #
        # The object count
        #
        cpmi.add_object_count_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           object_count)
        #
        # The object locations
        #
        cpmi.add_object_location_measurements(
            workspace.measurements, self.subregion_objects_name.value,
            tertiary_labels)
        #
        # The outlines
        #
        if self.use_outlines.value:
            out_img = cpi.Image(tertiary_outlines.astype(bool),
                                parent_image=tertiary_image)
            workspace.image_set.add(self.outlines_name.value, out_img)
 def run(self, workspace):
     '''Run the module on an image set'''
     
     object_name = self.object_name.value
     remaining_object_name = self.remaining_objects.value
     original_objects = workspace.object_set.get_objects(object_name)
     
     if self.mask_choice == MC_IMAGE:
         mask = workspace.image_set.get_image(self.masking_image.value,
                                              must_be_binary = True)
         mask = mask.pixel_data
     else:
         masking_objects = workspace.object_set.get_objects(
             self.masking_objects.value)
         mask = masking_objects.segmented > 0
     if self.wants_inverted_mask:
         mask = ~mask
     #
     # Load the labels
     #
     labels = original_objects.segmented.copy()
     nobjects = np.max(labels)
     #
     # Resize the mask to cover the objects
     #
     mask, m1 = cpo.size_similarly(labels, mask)
     mask[~m1] = False
     #
     # Apply the mask according to the overlap choice.
     #
     if nobjects == 0:
         pass
     elif self.overlap_choice == P_MASK:
         labels = labels * mask
     else:
         pixel_counts = fix(scind.sum(mask, labels, 
                                      np.arange(1, nobjects+1,dtype=np.int32)))
         if self.overlap_choice == P_KEEP:
             keep = pixel_counts > 0
         else:
             total_pixels = fix(scind.sum(np.ones(labels.shape), labels,
                                          np.arange(1, nobjects+1,dtype=np.int32)))
             if self.overlap_choice == P_REMOVE:
                 keep = pixel_counts == total_pixels
             elif self.overlap_choice == P_REMOVE_PERCENTAGE:
                 fraction = self.overlap_fraction.value
                 keep = pixel_counts / total_pixels >= fraction
             else:
                 raise NotImplementedError("Unknown overlap-handling choice: %s",
                                           self.overlap_choice.value)
         keep = np.hstack(([False], keep))
         labels[~ keep[labels]] = 0
     #
     # Renumber the labels matrix if requested
     #
     if self.retain_or_renumber == R_RENUMBER:
         unique_labels = np.unique(labels[labels!=0])
         indexer = np.zeros(nobjects+1, int)
         indexer[unique_labels] = np.arange(1, len(unique_labels)+1)
         labels = indexer[labels]
         parent_objects = unique_labels
     else:
         parent_objects = np.arange(1, nobjects+1)
     #
     # Add the objects
     #
     remaining_objects = cpo.Objects()
     remaining_objects.segmented = labels
     remaining_objects.unedited_segmented = original_objects.unedited_segmented
     workspace.object_set.add_objects(remaining_objects, 
                                      remaining_object_name)
     #
     # Add measurements
     #
     m = workspace.measurements
     m.add_measurement(remaining_object_name,
                       I.FF_PARENT % object_name,
                       parent_objects)
     if np.max(original_objects.segmented) == 0:
         child_count = np.array([],int)
     else:
         child_count = fix(scind.sum(labels, original_objects.segmented,
                                     np.arange(1, nobjects+1,dtype=np.int32)))
         child_count = (child_count > 0).astype(int)
     m.add_measurement(object_name,
                       I.FF_CHILDREN_COUNT % remaining_object_name,
                       child_count)
     if self.retain_or_renumber == R_RETAIN:
         remaining_object_count = nobjects
     else:
         remaining_object_count = len(unique_labels)
     I.add_object_count_measurements(m, remaining_object_name,
                                     remaining_object_count)
     I.add_object_location_measurements(m, remaining_object_name, labels)
     #
     # Add an outline if asked to do so
     #
     if self.wants_outlines.value:
         outline_image = cpi.Image(outline(labels) > 0,
                                   parent_image = original_objects.parent_image)
         workspace.image_set.add(self.outlines_name.value, outline_image)
     #
     # Save the input, mask and output images for display
     #
     if self.show_window:
         workspace.display_data.original_labels = original_objects.segmented
         workspace.display_data.final_labels = labels
         workspace.display_data.mask = mask
Example #16
0
    def run(self, workspace):
        dict = self.get_file_names(workspace)
        root = self.get_base_directory(workspace)
        statistics = [("Image name", "File")]
        m = workspace.measurements
        for image_name in dict.keys():
            file_settings = self.get_file_settings(image_name)
            rescale = (file_settings.image_objects_choice == IO_IMAGES
                       and file_settings.rescale)
            provider = LoadImagesImageProvider(image_name, root,
                                               dict[image_name], rescale)
            image = provider.provide_image(workspace.image_set)
            pixel_data = image.pixel_data
            if file_settings.image_objects_choice == IO_IMAGES:
                workspace.image_set.providers.append(provider)
                #
                # Add measurements
                #
                path_name_category = C_PATH_NAME
                file_name_category = C_FILE_NAME
                digest = hashlib.md5()
                digest.update(np.ascontiguousarray(pixel_data).data)
                m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)),
                                        digest.hexdigest())
                m.add_image_measurement("_".join((C_SCALING, image_name)),
                                        image.scale)
                m.add_image_measurement("_".join((C_HEIGHT, image_name)),
                                        int(pixel_data.shape[0]))
                m.add_image_measurement("_".join((C_WIDTH, image_name)),
                                        int(pixel_data.shape[1]))
            else:
                #
                # Turn image into objects
                #
                labels = convert_image_to_objects(pixel_data)
                objects = cpo.Objects()
                objects.segmented = labels
                object_set = workspace.object_set
                assert isinstance(object_set, cpo.ObjectSet)
                object_set.add_objects(objects, image_name)
                #
                # Add measurements
                #
                add_object_count_measurements(m, image_name, objects.count)
                add_object_location_measurements(m, image_name, labels)
                path_name_category = C_OBJECTS_PATH_NAME
                file_name_category = C_OBJECTS_FILE_NAME
                #
                # Add outlines if appropriate
                #
                if file_settings.wants_outlines:
                    outlines = cellprofiler.cpmath.outline.outline(labels)
                    outline_image = cpi.Image(outlines.astype(bool))
                    workspace.image_set.add(file_settings.outlines_name.value,
                                            outline_image)

            m.add_image_measurement(file_name_category + '_' + image_name,
                                    dict[image_name])
            m.add_image_measurement(path_name_category + '_' + image_name,
                                    root)

            statistics += [(image_name, dict[image_name])]
        if workspace.frame:
            title = "Load single image: image cycle # %d" % (
                workspace.measurements.image_set_number + 1)
            figure = workspace.create_or_find_figure(
                title="LoadSingleImage, image cycle #%d" %
                (workspace.measurements.image_set_number),
                subplots=(1, 1))
            figure.subplot_table(0, 0, statistics)
    def run(self, workspace):
        """Run the module on the current data set
        
        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(
            self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(
            self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([
                    p_size < s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(
                    secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([
                    p_size > s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                primary_labels = secondary_objects.crop_image_similarly(
                    primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image != None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = cpo.size_similarly(secondary_labels,
                                                   primary_labels)
            if secondary_objects.parent_image != None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = cpo.size_similarly(
                        secondary_labels, tertiary_image)
        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        if self.shrink_primary:
            primary_mask = np.logical_or(primary_labels == 0, primary_outline)
        else:
            primary_mask = primary_labels == 0
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels) != 0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = cpo.Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = \
            secondary_objects.relate_children(tertiary_objects)
        if self.shrink_primary:
            child_count_of_primary, primary_parents = \
                primary_objects.relate_children(tertiary_objects)
        else:
            # Primary and tertiary don't overlap. If tertiary object
            # disappeared, have primary disavow knowledge of it.
            child_count_of_primary = np.zeros(primary_objects.count)
            child_count_of_primary[tertiary_objects.areas > 0] = 1
            primary_parents = np.arange(1, tertiary_objects.count + 1)

        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count\
         in ((self.primary_objects_name, primary_parents,child_count_of_primary),
             (self.secondary_objects_name, secondary_parents, child_count_of_secondary)):
            m.add_measurement(self.subregion_objects_name.value,
                              cpmi.FF_PARENT % (parent_objects_name.value),
                              parents_of)
            m.add_measurement(
                parent_objects_name.value,
                cpmi.FF_CHILDREN_COUNT % (self.subregion_objects_name.value),
                child_count)
        object_count = tertiary_objects.count
        #
        # The object count
        #
        cpmi.add_object_count_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           object_count)
        #
        # The object locations
        #
        cpmi.add_object_location_measurements(
            workspace.measurements, self.subregion_objects_name.value,
            tertiary_labels)
        #
        # The outlines
        #
        if self.use_outlines.value:
            out_img = cpi.Image(tertiary_outlines.astype(bool),
                                parent_image=tertiary_image)
            workspace.image_set.add(self.outlines_name.value, out_img)

        if self.show_window:
            workspace.display_data.primary_labels = primary_labels
            workspace.display_data.secondary_labels = secondary_labels
            workspace.display_data.tertiary_labels = tertiary_labels
            workspace.display_data.tertiary_outlines = tertiary_outlines
    def run(self, workspace):
        """Run the module on the current data set
        
        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([p_size < s_size 
                    for p_size,s_size
                    in zip(primary_labels.shape, secondary_labels.shape)]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([p_size > s_size 
                    for p_size,s_size
                    in zip(primary_labels.shape, secondary_labels.shape)]):
                primary_labels = secondary_objects.crop_image_similarly(primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = cpo.size_similarly(secondary_labels, primary_labels)
            if secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = cpo.size_similarly(secondary_labels, tertiary_image)
        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        if self.shrink_primary:
            primary_mask = np.logical_or(primary_labels == 0,
                                         primary_outline)
        else:
            primary_mask = primary_labels == 0
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels)!=0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = cpo.Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = \
            secondary_objects.relate_children(tertiary_objects)
        if self.shrink_primary:
            child_count_of_primary, primary_parents = \
                primary_objects.relate_children(tertiary_objects)
        else:
            # Primary and tertiary don't overlap.
            # Establish overlap between primary and secondary and commute
            _, secondary_of_primary = \
                secondary_objects.relate_children(primary_objects)
            mask = secondary_of_primary != 0
            child_count_of_primary = np.zeros(mask.shape, int)
            child_count_of_primary[mask] = child_count_of_secondary[
                secondary_of_primary[mask] - 1]
            primary_parents = np.zeros(secondary_parents.shape, 
                                       secondary_parents.dtype)
            primary_of_secondary = np.zeros(secondary_objects.count+1, int)
            primary_of_secondary[secondary_of_primary] = \
                np.arange(1, len(secondary_of_primary)+1)
            primary_of_secondary[0] = 0
            primary_parents = primary_of_secondary[secondary_parents]
        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count, relationship in (
            (self.primary_objects_name, primary_parents, 
             child_count_of_primary, R_REMOVED),
            (self.secondary_objects_name, secondary_parents, 
             child_count_of_secondary, R_PARENT)):
            m.add_measurement(self.subregion_objects_name.value,
                              cpmi.FF_PARENT%(parent_objects_name.value),
                              parents_of)
            m.add_measurement(parent_objects_name.value,
                              cpmi.FF_CHILDREN_COUNT%(self.subregion_objects_name.value),
                              child_count)
            mask = parents_of != 0
            image_number = np.ones(np.sum(mask), int) * m.image_set_number
            child_object_number = np.argwhere(mask).flatten() + 1
            parent_object_number = parents_of[mask]
            m.add_relate_measurement(
                self.module_num, relationship,
                parent_objects_name.value, self.subregion_objects_name.value,
                image_number, parent_object_number,
                image_number, child_object_number)
            
        object_count = tertiary_objects.count
        #
        # The object count
        #
        cpmi.add_object_count_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           object_count)
        #
        # The object locations
        #
        cpmi.add_object_location_measurements(workspace.measurements,
                                              self.subregion_objects_name.value,
                                              tertiary_labels)
        #
        # The outlines
        #
        if self.use_outlines.value:
            out_img = cpi.Image(tertiary_outlines.astype(bool),
                                parent_image = tertiary_image)
            workspace.image_set.add(self.outlines_name.value, out_img)

        if self.show_window:
            workspace.display_data.primary_labels = primary_labels
            workspace.display_data.secondary_labels = secondary_labels
            workspace.display_data.tertiary_labels = tertiary_labels
            workspace.display_data.tertiary_outlines = tertiary_outlines
 def run(self, workspace):
     """Run the module on the current data set
     
     workspace - has the current image set, object set, measurements
                 and the parent frame for the application if the module
                 is allowed to display. If the module should not display,
                 workspace.frame is None.
     """
     #
     # The object set holds "objects". Each of these is a container
     # for holding up to three kinds of image labels.
     #
     object_set = workspace.object_set
     #
     # Get the primary objects (the centers to be removed).
     # Get the string value out of primary_object_name.
     #
     primary_objects = object_set.get_objects(self.primary_objects_name.value)
     #
     # Get the cleaned-up labels image
     #
     primary_labels = primary_objects.segmented
     #
     # Do the same with the secondary object
     secondary_objects = object_set.get_objects(self.secondary_objects_name.value)
     secondary_labels = secondary_objects.segmented
     #
     # If one of the two label images is smaller than the other, we
     # try to find the cropping mask and we apply that mask to the larger
     #
     try:
         if any([p_size < s_size 
                 for p_size,s_size
                 in zip(primary_labels.shape, secondary_labels.shape)]):
             #
             # Look for a cropping mask associated with the primary_labels
             # and apply that mask to resize the secondary labels
             #
             secondary_labels = primary_objects.crop_image_similarly(secondary_labels)
             tertiary_image = primary_objects.parent_image
         elif any([p_size > s_size 
                 for p_size,s_size
                 in zip(primary_labels.shape, secondary_labels.shape)]):
             primary_labels = secondary_objects.crop_image_similarly(primary_labels)
             tertiary_image = secondary_objects.parent_image
         elif secondary_objects.parent_image != None:
             tertiary_image = secondary_objects.parent_image
         else:
             tertiary_image = primary_objects.parent_image
     except ValueError:
         # No suitable cropping - resize all to fit the secondary
         # labels which are the most critical.
         #
         primary_labels, _ = cpo.size_similarly(secondary_labels, primary_labels)
         if secondary_objects.parent_image != None:
             tertiary_image = secondary_objects.parent_image
         else:
             tertiary_image = primary_objects.parent_image
             if tertiary_image is not None:
                 tertiary_image, _ = cpo.size_similarly(secondary_labels, tertiary_image)
     #
     # Find the outlines of the primary image and use this to shrink the
     # primary image by one. This guarantees that there is something left
     # of the secondary image after subtraction
     #
     primary_outline = outline(primary_labels)
     tertiary_labels = secondary_labels.copy()
     primary_mask = np.logical_or(primary_labels == 0,
                                  primary_outline)
     tertiary_labels[primary_mask == False] = 0
     #
     # Get the outlines of the tertiary image
     #
     tertiary_outlines = outline(tertiary_labels)!=0
     #
     # Make the tertiary objects container
     #
     tertiary_objects = cpo.Objects()
     tertiary_objects.segmented = tertiary_labels
     tertiary_objects.parent_image = tertiary_image
     #
     # Relate tertiary objects to their parents & record
     #
     child_count_of_secondary, secondary_parents = \
         secondary_objects.relate_children(tertiary_objects)
     child_count_of_primary, primary_parents = \
         primary_objects.relate_children(tertiary_objects)
     
     if workspace.frame != None:
         import cellprofiler.gui.cpfigure as cpf
         #
         # Draw the primary, secondary and tertiary labels
         # and the outlines
         #
         window_name = "CellProfiler:%s:%d"%(self.module_name,self.module_num)
         my_frame=cpf.create_or_find(workspace.frame, 
                                     title="IdentifyTertiaryObjects, image cycle #%d"%(
             workspace.measurements.image_set_number), 
                                     name=window_name, subplots=(2,2))
         
         title = "%s, cycle # %d"%(self.primary_objects_name.value,
                                   workspace.image_set.number+1)
         my_frame.subplot_imshow_labels(0,0,primary_labels,title)
         my_frame.subplot_imshow_labels(1,0,secondary_labels, 
                                        self.secondary_objects_name.value,
                                        sharex = my_frame.subplot(0,0),
                                        sharey = my_frame.subplot(0,0))
         my_frame.subplot_imshow_labels(0, 1,tertiary_labels, 
                                        self.subregion_objects_name.value,
                                        sharex = my_frame.subplot(0,0),
                                        sharey = my_frame.subplot(0,0))
         my_frame.subplot_imshow_bw(1,1,tertiary_outlines, 
                                    "Outlines",
                                    sharex = my_frame.subplot(0,0),
                                    sharey = my_frame.subplot(0,0))
         my_frame.Refresh()
     #
     # Write out the objects
     #
     workspace.object_set.add_objects(tertiary_objects,
                                      self.subregion_objects_name.value)
     #
     # Write out the measurements
     #
     m = workspace.measurements
     #
     # The parent/child associations
     #
     for parent_objects_name, parents_of, child_count\
      in ((self.primary_objects_name, primary_parents,child_count_of_primary),
          (self.secondary_objects_name, secondary_parents, child_count_of_secondary)):
         m.add_measurement(self.subregion_objects_name.value,
                           cpmi.FF_PARENT%(parent_objects_name.value),
                           parents_of)
         m.add_measurement(parent_objects_name.value,
                           cpmi.FF_CHILDREN_COUNT%(self.subregion_objects_name.value),
                           child_count)
     object_count = np.max(tertiary_labels)
     #
     # The object count
     #
     cpmi.add_object_count_measurements(workspace.measurements,
                                        self.subregion_objects_name.value,
                                        object_count)
     #
     # The object locations
     #
     cpmi.add_object_location_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           tertiary_labels)
     #
     # The outlines
     #
     if self.use_outlines.value:
         out_img = cpi.Image(tertiary_outlines.astype(bool),
                             parent_image = tertiary_image)
         workspace.image_set.add(self.outlines_name.value, out_img)
Example #20
0
 def run(self, workspace):
     '''Run the module on an image set'''
     
     object_name = self.object_name.value
     remaining_object_name = self.remaining_objects.value
     original_objects = workspace.object_set.get_objects(object_name)
     
     if self.mask_choice == MC_IMAGE:
         mask = workspace.image_set.get_image(self.masking_image.value,
                                              must_be_binary = True)
         mask = mask.pixel_data
     else:
         masking_objects = workspace.object_set.get_objects(
             self.masking_objects.value)
         mask = masking_objects.segmented > 0
     if self.wants_inverted_mask:
         mask = ~mask
     #
     # Load the labels
     #
     labels = original_objects.segmented.copy()
     nobjects = np.max(labels)
     #
     # Resize the mask to cover the objects
     #
     mask, m1 = cpo.size_similarly(labels, mask)
     mask[~m1] = False
     #
     # Apply the mask according to the overlap choice.
     #
     if nobjects == 0:
         pass
     elif self.overlap_choice == P_MASK:
         labels = labels * mask
     else:
         pixel_counts = fix(scind.sum(mask, labels, 
                                      np.arange(1, nobjects+1,dtype=np.int32)))
         if self.overlap_choice == P_KEEP:
             keep = pixel_counts > 0
         else:
             total_pixels = fix(scind.sum(np.ones(labels.shape), labels,
                                          np.arange(1, nobjects+1,dtype=np.int32)))
             if self.overlap_choice == P_REMOVE:
                 keep = pixel_counts == total_pixels
             elif self.overlap_choice == P_REMOVE_PERCENTAGE:
                 fraction = self.overlap_fraction.value
                 keep = pixel_counts / total_pixels >= fraction
             else:
                 raise NotImplementedError("Unknown overlap-handling choice: %s",
                                           self.overlap_choice.value)
         keep = np.hstack(([False], keep))
         labels[~ keep[labels]] = 0
     #
     # Renumber the labels matrix if requested
     #
     if self.retain_or_renumber == R_RENUMBER:
         unique_labels = np.unique(labels[labels!=0])
         indexer = np.zeros(nobjects+1, int)
         indexer[unique_labels] = np.arange(1, len(unique_labels)+1)
         labels = indexer[labels]
         parent_objects = unique_labels
     else:
         parent_objects = np.arange(1, nobjects+1)
     #
     # Add the objects
     #
     remaining_objects = cpo.Objects()
     remaining_objects.segmented = labels
     remaining_objects.unedited_segmented = original_objects.unedited_segmented
     workspace.object_set.add_objects(remaining_objects, 
                                      remaining_object_name)
     #
     # Add measurements
     #
     m = workspace.measurements
     m.add_measurement(remaining_object_name,
                       I.FF_PARENT % object_name,
                       parent_objects)
     if np.max(original_objects.segmented) == 0:
         child_count = np.array([],int)
     else:
         child_count = fix(scind.sum(labels, original_objects.segmented,
                                     np.arange(1, nobjects+1,dtype=np.int32)))
         child_count = (child_count > 0).astype(int)
     m.add_measurement(object_name,
                       I.FF_CHILDREN_COUNT % remaining_object_name,
                       child_count)
     if self.retain_or_renumber == R_RETAIN:
         remaining_object_count = nobjects
     else:
         remaining_object_count = len(unique_labels)
     I.add_object_count_measurements(m, remaining_object_name,
                                     remaining_object_count)
     I.add_object_location_measurements(m, remaining_object_name, labels)
     #
     # Add an outline if asked to do so
     #
     if self.wants_outlines.value:
         outline_image = cpi.Image(outline(labels) > 0,
                                   parent_image = original_objects.parent_image)
         workspace.image_set.add(self.outlines_name.value, outline_image)
     #
     # Save the input, mask and output images for display
     #
     if self.show_window:
         workspace.display_data.original_labels = original_objects.segmented
         workspace.display_data.final_labels = labels
         workspace.display_data.mask = mask
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image_name = self.image_name.value
        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale = True)
        workspace.display_data.statistics = []
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        else:
            thresholded_image = self.threshold_image(image_name, workspace)
            has_threshold = True

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0,:], labels_in[-1,:], labels_in[:,0], labels_in[:,-1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in)+1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~ is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances,(i,j) = scind.distance_transform_edt(labels_in == 0,
                                                               return_indices = True)
                labels_out = np.zeros(labels_in.shape,int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in,
                                                  thresholded_image,
                                                  1.0)
                labels_out[distances>self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in>0]
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in,
                                             thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image,
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1-img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img,
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                                objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out,
                                   objects.segmented,
                                   range(np.max(objects.segmented)+1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image = image)
                workspace.image_set.add(self.new_primary_outlines_name.value,
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)

        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image = image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add measurements
        #
        measurements = workspace.measurements
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT%objname,
                                     children_per_parent)
        measurements.add_measurement(objname,
                                     cpmi.FF_PARENT%self.primary_objects.value,
                                     parents_of_children)
        image_numbers = np.ones(len(parents_of_children), int) *\
            measurements.image_set_number
        mask = parents_of_children > 0
        measurements.add_relate_measurement(
            self.module_num, R_PARENT,
            self.primary_objects.value, self.objects_name.value,
            image_numbers[mask], parents_of_children[mask],
            image_numbers[mask],
            np.arange(1, len(parents_of_children) + 1)[mask])
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(new_objects,
                                             self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(measurements,
                                               self.new_primary_objects_name.value,
                                               np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(measurements,
                                                  self.new_primary_objects_name.value,
                                                  new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value,
                 new_objects, self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value,
                 objects_out, objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(parent_name,
                                             cpmi.FF_CHILDREN_COUNT%child_name,
                                             children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT%parent_name,
                                             parents_of_children)
        if self.show_window:
            object_area = np.sum(segmented_out > 0)
            workspace.display_data.object_pct = \
                100 * object_area / np.product(segmented_out.shape)
            workspace.display_data.img = img
            workspace.display_data.segmented_out = segmented_out
            workspace.display_data.primary_labels = objects.segmented
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.object_count = object_count
    def run(self, workspace):
        """Run the module on the current data set

        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(
            self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(
            self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([
                    p_size < s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(
                    secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([
                    p_size > s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                primary_labels = secondary_objects.crop_image_similarly(
                    primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = cpo.size_similarly(secondary_labels,
                                                   primary_labels)
            if secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = cpo.size_similarly(
                        secondary_labels, tertiary_image)
        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        if self.shrink_primary:
            primary_mask = np.logical_or(primary_labels == 0, primary_outline)
        else:
            primary_mask = primary_labels == 0
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels) != 0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = cpo.Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = \
            secondary_objects.relate_children(tertiary_objects)
        if self.shrink_primary:
            child_count_of_primary, primary_parents = \
                primary_objects.relate_children(tertiary_objects)
        else:
            # Primary and tertiary don't overlap.
            # Establish overlap between primary and secondary and commute
            _, secondary_of_primary = \
                secondary_objects.relate_children(primary_objects)
            mask = secondary_of_primary != 0
            child_count_of_primary = np.zeros(mask.shape, int)
            child_count_of_primary[mask] = child_count_of_secondary[
                secondary_of_primary[mask] - 1]
            primary_parents = np.zeros(secondary_parents.shape,
                                       secondary_parents.dtype)
            primary_of_secondary = np.zeros(secondary_objects.count + 1, int)
            primary_of_secondary[secondary_of_primary] = \
                np.arange(1, len(secondary_of_primary) + 1)
            primary_of_secondary[0] = 0
            primary_parents = primary_of_secondary[secondary_parents]
        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count, relationship in (
            (self.primary_objects_name, primary_parents,
             child_count_of_primary, R_REMOVED),
            (self.secondary_objects_name, secondary_parents,
             child_count_of_secondary, R_PARENT)):
            m.add_measurement(
                self.subregion_objects_name.value,
                cellprofiler.measurement.FF_PARENT % parent_objects_name.value,
                parents_of)
            m.add_measurement(
                parent_objects_name.value,
                cellprofiler.measurement.FF_CHILDREN_COUNT %
                self.subregion_objects_name.value, child_count)
            mask = parents_of != 0
            image_number = np.ones(np.sum(mask), int) * m.image_set_number
            child_object_number = np.argwhere(mask).flatten() + 1
            parent_object_number = parents_of[mask]
            m.add_relate_measurement(self.module_num, relationship,
                                     parent_objects_name.value,
                                     self.subregion_objects_name.value,
                                     image_number, parent_object_number,
                                     image_number, child_object_number)

        object_count = tertiary_objects.count
        #
        # The object count
        #
        cpmi.add_object_count_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           object_count)
        #
        # The object locations
        #
        cpmi.add_object_location_measurements(
            workspace.measurements, self.subregion_objects_name.value,
            tertiary_labels)

        if self.show_window:
            workspace.display_data.primary_labels = primary_labels
            workspace.display_data.secondary_labels = secondary_labels
            workspace.display_data.tertiary_labels = tertiary_labels
            workspace.display_data.tertiary_outlines = tertiary_outlines
Example #23
0
    def run(self, workspace):
        """Run the module
        
        workspace    - The workspace contains
            pipeline     - instance of cpp for this run
            image_set    - the images in the image set being processed
            object_set   - the objects (labeled masks) in this image set
            measurements - the measurements for this run
            frame        - the parent frame to whatever frame is created. None means don't draw.
        """
        orig_objects_name = self.object_name.value
        filtered_objects_name = self.filtered_objects.value

        orig_objects = workspace.object_set.get_objects(orig_objects_name)
        assert isinstance(orig_objects, cpo.Objects)
        orig_labels = orig_objects.segmented
        mask = orig_labels != 0

        if workspace.frame is None:
            # Accept the labels as-is
            filtered_labels = orig_labels
        else:
            filtered_labels = self.filter_objects(workspace, orig_labels)
        #
        # Renumber objects consecutively if asked to do so
        #
        unique_labels = np.unique(filtered_labels)
        unique_labels = unique_labels[unique_labels != 0]
        object_count = len(unique_labels)
        if self.renumber_choice == R_RENUMBER:
            mapping = np.zeros(
                1 if len(unique_labels) == 0 else np.max(unique_labels) + 1,
                int)
            mapping[unique_labels] = np.arange(1, object_count + 1)
            filtered_labels = mapping[filtered_labels]
        #
        # Make the objects out of the labels
        #
        filtered_objects = cpo.Objects()
        filtered_objects.segmented = filtered_labels
        filtered_objects.unedited_segmented = orig_objects.unedited_segmented
        filtered_objects.parent_image = orig_objects.parent_image
        workspace.object_set.add_objects(filtered_objects,
                                         filtered_objects_name)
        #
        # Add parent/child & other measurements
        #
        m = workspace.measurements
        child_count, parents = orig_objects.relate_children(filtered_objects)
        m.add_measurement(filtered_objects_name,
                          I.FF_PARENT % (orig_objects_name), parents)
        m.add_measurement(orig_objects_name,
                          I.FF_CHILDREN_COUNT % (filtered_objects_name),
                          child_count)
        #
        # The object count
        #
        I.add_object_count_measurements(m, filtered_objects_name, object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements(m, filtered_objects_name,
                                           filtered_labels)
        #
        # Outlines if we want them
        #
        if self.wants_outlines:
            outlines_name = self.outlines_name.value
            outlines = outline(filtered_labels)
            outlines_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(outlines_name, outlines_image)
        #
        # Do the drawing here
        #
        if workspace.frame is not None:
            figure = workspace.create_or_find_figure(
                title="EditObjectsManually, image cycle #%d" %
                (workspace.measurements.image_set_number),
                subplots=(2, 1))
            figure.subplot_imshow_labels(0, 0, orig_labels, orig_objects_name)
            figure.subplot_imshow_labels(1,
                                         0,
                                         filtered_labels,
                                         filtered_objects_name,
                                         sharex=figure.subplot(0, 0),
                                         sharey=figure.subplot(0, 0))