Esempio n. 1
0
 def test_03_03_random_objects_scale(self):
     np.random.seed(0)
     y,x = np.mgrid[-20:20,-20:20].astype(float)/20
     min = int(20/np.sqrt(2))+1
     max = 40-min
     for points in range(4,12): 
         labels = np.zeros((41,41),int)
         coords = np.random.uniform(low=min,high=max,size=(points,2)).astype(int)
         angles = np.array([np.arctan2(y[yi,xi],x[yi,xi]) for xi,yi in coords])
         order = np.argsort(angles)
         for i in range(points-1):
             draw_line(labels,coords[i],coords[i+1])
         draw_line(labels,coords[i],coords[0])
         fill_labeled_holes(labels)
         self.score_scales(labels,2)
Esempio n. 2
0
 def test_03_03_random_objects_scale(self):
     np.random.seed(0)
     y,x = np.mgrid[-20:20,-20:20].astype(float)/20
     min = int(20/np.sqrt(2))+1
     max = 40-min
     for points in range(4,12): 
         labels = np.zeros((41,41),int)
         coords = np.random.uniform(low=min,high=max,size=(points,2)).astype(int)
         angles = np.array([np.arctan2(y[yi,xi],x[yi,xi]) for xi,yi in coords])
         order = np.argsort(angles)
         for i in range(points-1):
             draw_line(labels,coords[i],coords[i+1])
         draw_line(labels,coords[i],coords[0])
         fill_labeled_holes(labels)
         self.score_scales(labels,2)
 def run(self, workspace):
     labeled_nuclei = workspace.object_set.get_objects(self.primary_objects.value).get_segmented()
     cell_image = workspace.image_set.get_image(self.image_name.value).pixel_data[:,:]
     image_collection = []
     cell_treshold = otsu(cell_image, min_threshold=0, max_threshold=1)
     
     cell_binary = (cell_image >= cell_treshold)
     cell_distance = scipym.distance_transform_edt(cell_binary).astype(np.uint16)
     cell_labeled = skm.watershed(-cell_distance, labeled_nuclei, mask=cell_binary)
     
      
     #
     #fil hall and filter on syze the object in cell_labeled
     #
     cell_labeled = self.filter_on_border(cell_labeled)
     cell_labeled = fill_labeled_holes(cell_labeled)
 
     objects = cellprofiler.objects.Objects()
     objects.segmented = cell_labeled
     objects.parent_image = cell_image
     
     workspace.object_set.add_objects(objects, self.object_name.value)        
     image_collection.append((cell_image, "Original"))
     image_collection.append((cell_labeled, "Labelized image"))
     workspace.display_data.image_collection = image_collection
Esempio n. 4
0
 def test_03_02_triangle_scale(self):
     labels = np.zeros((31,31),int)
     draw_line(labels, (15,0), (5,25))
     draw_line(labels, (5,25),(25,25))
     draw_line(labels, (25,25),(15,0))
     labels = fill_labeled_holes(labels)
     labels = labels>0
     self.score_scales(labels, 2)
Esempio n. 5
0
 def test_03_02_triangle_scale(self):
     labels = np.zeros((31, 31), int)
     draw_line(labels, (15, 0), (5, 25))
     draw_line(labels, (5, 25), (25, 25))
     draw_line(labels, (25, 25), (15, 0))
     labels = fill_labeled_holes(labels)
     labels = labels > 0
     self.score_scales(labels, 2)
    def run(self, workspace):
 
        image = workspace.image_set.get_image(self.image_name.value)
        nuclei_image = image.pixel_data[:,:]
        image_collection = []
      
        #
        #Get the global Threshold with Otsu algorithm and smooth nuclei image
        #
#         nuclei_smoothed = self.smooth_image(image_collection[3][0], image.mask, 1)

        global_threshold_nuclei = otsu(nuclei_image, min_threshold=0, max_threshold=1)
        print "the threshold compute by the Otsu algorythm is %f" % global_threshold_nuclei      
        
        
        #
        #Binary thee "DAPI" Image (Nuclei) and labelelize the nuclei
        #

        binary_nuclei = (nuclei_image >= global_threshold_nuclei)
        labeled_nuclei, object_count = scipy.ndimage.label(binary_nuclei, np.ones((3,3), bool))
        print "the image got %d detected" % object_count
        
        #
        #Fill the hole and delete object witch touch the border. 
        #labeled_nuclei is modify after the function
        #Filter small object and split object
        #
        labeled_nuclei = fill_labeled_holes(labeled_nuclei)        
        labeled_nuclei = self.filter_on_border(labeled_nuclei)
        labeled_nuclei = self.filter_on_size(labeled_nuclei, object_count)         
        labeled_nuclei = self.split_object(labeled_nuclei)
        
        #
        #Edge detection of nuclei image and object are more separated 
        #
        labeled_nuclei_canny = skf.sobel(labeled_nuclei)       
        labeled_nuclei[labeled_nuclei_canny > 0] = 0
        labeled_nuclei = skr.minimum(labeled_nuclei.astype(np.uint16), skm.disk(3))
        
        image_collection.append((nuclei_image, "Original"))
        image_collection.append((labeled_nuclei, "Labelized image"))
        workspace.display_data.image_collection = image_collection

        #
        #Create a new object which will be add to the workspace
        #
        objects = cellprofiler.objects.Objects()
        objects.segmented = labeled_nuclei
        objects.parent_image = nuclei_image
        
        workspace.object_set.add_objects(objects, self.object_name.value)
 def on_lasso(vertices):
     lasso = current_lasso.pop()
     figure.canvas.widgetlock.release(lasso)
     mask = np.zeros(pixel_data.shape[:2], int)
     new_label = np.max(labels) + 1
     for i in range(len(vertices)):
         v0 = (int(vertices[i][1]), int(vertices[i][0]))
         i_next = (i+1) % len(vertices)
         v1 = (int(vertices[i_next][1]), int(vertices[i_next][0]))
         draw_line(mask, v0, v1, new_label)
     mask = fill_labeled_holes(mask)
     labels[mask != 0] = new_label
     draw()
     if labels.max() > 0:
         erase_all_button.Enable()
         erase_last_button.Enable()
Esempio n. 8
0
 def on_lasso(vertices):
     lasso = current_lasso.pop()
     figure.canvas.widgetlock.release(lasso)
     mask = np.zeros(pixel_data.shape[:2], int)
     new_label = np.max(labels) + 1
     vertices = [x for x in vertices 
                 if x[0] is not None and x[1] is not None]
     for i in range(len(vertices)):
         v0 = (int(vertices[i][1]), int(vertices[i][0]))
         i_next = (i+1) % len(vertices)
         v1 = (int(vertices[i_next][1]), int(vertices[i_next][0]))
         draw_line(mask, v0, v1, new_label)
     mask = fill_labeled_holes(mask)
     labels[mask != 0] = new_label
     draw()
     if labels.max() > 0:
         erase_all_button.Enable()
         erase_last_button.Enable()
 def do_labels(self, labels):
     '''Run whatever transformation on the given labels matrix'''
     if (self.operation in (O_SHRINK, O_SHRINK_INF) and 
         self.wants_fill_holes.value):
         labels = fill_labeled_holes(labels) 
         
     if self.operation == O_SHRINK_INF:
         return binary_shrink(labels)
     elif self.operation == O_SHRINK:
         return binary_shrink(labels, iterations = self.iterations.value)
     elif self.operation in (O_EXPAND, O_EXPAND_INF):
         if self.operation == O_EXPAND_INF:
             distance = np.max(labels.shape)
         else:
             distance = self.iterations.value
         background = labels == 0
         distances, (i,j) = distance_transform_edt(background, 
                                                   return_indices = True)
         out_labels = labels.copy()
         mask = (background & (distances <= distance))
         out_labels[mask] = labels[i[mask],j[mask]]
         return out_labels
     elif self.operation == O_DIVIDE:
         #
         # A pixel must be adjacent to some other label and the object
         # must not disappear.
         #
         adjacent_mask = adjacent(labels)
         thinnable_mask = binary_shrink(labels, 1) != 0
         out_labels = labels.copy()
         out_labels[adjacent_mask & ~ thinnable_mask] = 0
         return out_labels
     elif self.operation == O_SKELETONIZE:
         return skeletonize_labels(labels)
     elif self.operation == O_SPUR:
         return spur(labels, iterations=self.iterations.value)
     else:
         raise NotImplementedError("Unsupported operation: %s" %
                                   self.operation.value)
 def do_labels(self, labels):
     '''Run whatever transformation on the given labels matrix'''
     if (self.operation in (O_SHRINK, O_SHRINK_INF) and 
         self.wants_fill_holes.value):
         labels = fill_labeled_holes(labels) 
         
     if self.operation == O_SHRINK_INF:
         return binary_shrink(labels)
     elif self.operation == O_SHRINK:
         return binary_shrink(labels, iterations = self.iterations.value)
     elif self.operation in (O_EXPAND, O_EXPAND_INF):
         if self.operation == O_EXPAND_INF:
             distance = np.max(labels.shape)
         else:
             distance = self.iterations.value
         background = labels == 0
         distances, (i,j) = distance_transform_edt(background, 
                                                   return_indices = True)
         out_labels = labels.copy()
         mask = (background & (distances <= distance))
         out_labels[mask] = labels[i[mask],j[mask]]
         return out_labels
     elif self.operation == O_DIVIDE:
         #
         # A pixel must be adjacent to some other label and the object
         # must not disappear.
         #
         adjacent_mask = adjacent(labels)
         thinnable_mask = binary_shrink(labels, 1) != 0
         out_labels = labels.copy()
         out_labels[adjacent_mask & ~ thinnable_mask] = 0
         return out_labels
     elif self.operation == O_SKELETONIZE:
         return skeletonize_labels(labels)
     elif self.operation == O_SPUR:
         return spur(labels, iterations=self.iterations.value)
     else:
         raise NotImplementedError("Unsupported operation: %s" %
                                   self.operation.value)
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image = workspace.image_set.get_image(self.image_name.value,
                                              must_be_grayscale=True)
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        elif self.threshold_method == cpthresh.TM_BINARY_IMAGE:
            binary_image = workspace.image_set.get_image(
                self.binary_image.value, must_be_binary=True)
            local_threshold = np.ones(
                img.shape) * np.max(img) + np.finfo(float).eps
            local_threshold[
                binary_image.pixel_data] = np.min(img) - np.finfo(float).eps
            global_threshold = cellprofiler.cpmath.otsu.otsu(
                img[mask], self.threshold_range.min, self.threshold_range.max)
            has_threshold = True
        else:
            local_threshold, global_threshold = self.get_threshold(
                img, mask, None, workspace)
            has_threshold = True

        if has_threshold:
            thresholded_image = img > local_threshold

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0, :], labels_in[-1, :], labels_in[:,
                                                          0], labels_in[:,
                                                                        -1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in) + 1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances, (i, j) = scind.distance_transform_edt(
                    labels_in == 0, return_indices=True)
                labels_out = np.zeros(labels_in.shape, int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in,
                                                  thresholded_image, 1.0)
                labels_out[distances > self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in > 0]
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1 - img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out, objects.segmented,
                                   range(np.max(objects.segmented) + 1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image=image)
                workspace.image_set.add(self.new_primary_outlines_name.value,
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)
        if workspace.frame != None:
            object_area = np.sum(segmented_out > 0)
            object_pct = 100 * object_area / np.product(segmented_out.shape)

            my_frame = workspace.create_or_find_figure(
                title="IdentifySecondaryObjects, image cycle #%d" %
                (workspace.measurements.image_set_number),
                subplots=(2, 2))
            title = "Input image, cycle #%d" % (workspace.image_set.number + 1)
            my_frame.subplot_imshow_grayscale(0, 0, img, title)
            my_frame.subplot_imshow_labels(1,
                                           0,
                                           segmented_out,
                                           "Labeled image",
                                           sharex=my_frame.subplot(0, 0),
                                           sharey=my_frame.subplot(0, 0))

            outline_img = np.dstack((img, img, img))
            cpmi.draw_outline(outline_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(0,
                                    1,
                                    outline_img,
                                    "Outlined image",
                                    normalize=False,
                                    sharex=my_frame.subplot(0, 0),
                                    sharey=my_frame.subplot(0, 0))

            primary_img = np.dstack((img, img, img))
            cpmi.draw_outline(primary_img, primary_outline > 0,
                              cpprefs.get_primary_outline_color())
            cpmi.draw_outline(primary_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(1,
                                    1,
                                    primary_img,
                                    "Primary and output outlines",
                                    normalize=False,
                                    sharex=my_frame.subplot(0, 0),
                                    sharey=my_frame.subplot(0, 0))
            if global_threshold is not None:
                my_frame.status_bar.SetFields([
                    "Threshold: %.3f" % global_threshold,
                    "Area covered by objects: %.1f %%" % object_pct
                ])
            else:
                my_frame.status_bar.SetFields(
                    ["Area covered by objects: %.1f %%" % object_pct])
        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image=image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add the background measurements if made
        #
        measurements = workspace.measurements
        if has_threshold:
            if isinstance(local_threshold, np.ndarray):
                ave_threshold = np.mean(local_threshold)
            else:
                ave_threshold = local_threshold

            measurements.add_measurement(
                cpmeas.IMAGE, cpmi.FF_FINAL_THRESHOLD % (objname),
                np.array([ave_threshold], dtype=float))
            measurements.add_measurement(
                cpmeas.IMAGE, cpmi.FF_ORIG_THRESHOLD % (objname),
                np.array([global_threshold], dtype=float))
            wv = cpthresh.weighted_variance(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_WEIGHTED_VARIANCE % (objname),
                                         np.array([wv], dtype=float))
            entropies = cpthresh.sum_of_entropies(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_SUM_OF_ENTROPIES % (objname),
                                         np.array([entropies], dtype=float))
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT % objname,
                                     children_per_parent)
        measurements.add_measurement(
            objname, cpmi.FF_PARENT % self.primary_objects.value,
            parents_of_children)
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(
                new_objects, self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(
                measurements, self.new_primary_objects_name.value,
                np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(
                measurements, self.new_primary_objects_name.value,
                new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value, new_objects,
                 self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value, objects_out,
                 objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(
                    parent_name, cpmi.FF_CHILDREN_COUNT % child_name,
                    children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT % parent_name,
                                             parents_of_children)
Esempio n. 12
0
    def run_function(self, function_name, pixel_data, mask, count, scale,
                     custom_repeats):
        '''Apply the function once to the image, returning the result'''
        is_binary =  pixel_data.dtype.kind == 'b'
        strel = morph.strel_disk(scale / 2.0)
        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL, F_HBREAK, F_LIFE, F_MAJORITY, 
                              F_REMOVE, F_SHRINK, F_SKEL, F_SPUR, F_THICKEN, 
                              F_THIN, F_VBREAK) 
            and not is_binary):
            # Apply a very crude threshold to the image for binary algorithms
            logger.warning("Warning: converting image to binary for %s\n"%
                           function_name)
            pixel_data = pixel_data != 0

        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL,
                              F_HBREAK, F_INVERT, F_LIFE, F_MAJORITY, F_REMOVE,
                              F_SHRINK,
                              F_SKEL, F_SPUR, F_THICKEN, F_THIN, F_VBREAK) or
            (is_binary and
             function_name in (F_CLOSE, F_DILATE, F_ERODE, F_OPEN))):
            # All of these have an iterations argument or it makes no
            # sense to iterate
            if function_name == F_BRANCHPOINTS:
                return morph.branchpoints(pixel_data, mask)
            elif function_name == F_BRIDGE:
                return morph.bridge(pixel_data, mask, count)
            elif function_name == F_CLEAN:
                return morph.clean(pixel_data, mask, count)
            elif function_name == F_CLOSE:
                if mask is None:
                    return scind.binary_closing(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_closing(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_CONVEX_HULL:
                if mask is None:
                    return morph.convex_hull_image(pixel_data)
                else:
                    return morph.convex_hull_image(pixel_data & mask)
            elif function_name == F_DIAG:
                return morph.diag(pixel_data, mask, count)
            elif function_name == F_DILATE:
                return scind.binary_dilation(pixel_data, 
                                             strel,
                                             iterations=count,
                                             mask=mask)
            elif function_name == F_DISTANCE:
                image = scind.distance_transform_edt(pixel_data)
                img_max = np.max(image)
                if img_max > 0:
                    image = image / img_max
                return image
            elif function_name == F_ENDPOINTS:
                return morph.endpoints(pixel_data, mask)
            elif function_name == F_ERODE:
                return scind.binary_erosion(pixel_data, strel,
                                            iterations = count,
                                            mask = mask)
            elif function_name == F_FILL:
                return morph.fill(pixel_data, mask, count)
            elif function_name == F_FILL_SMALL:
                def small_fn(area, foreground):
                    return (not foreground) and (area <= custom_repeats)
                return morph.fill_labeled_holes(pixel_data, mask, small_fn)
            elif function_name == F_HBREAK:
                return morph.hbreak(pixel_data, mask, count)
            elif function_name == F_INVERT:
                if is_binary:
                    if mask is None:
                        return ~ pixel_data
                    result = pixel_data.copy()
                    result[mask] = ~result[mask]
                    return result
                elif mask is None:
                    return 1-pixel_data
                else:
                    result = pixel_data.copy()
                    result[mask]  = 1-result[mask]
                    return result
            elif function_name == F_LIFE:
                return morph.life(pixel_data, count)
            elif function_name == F_MAJORITY:
                return morph.majority(pixel_data, mask, count)
            elif function_name == F_OPEN:
                if mask is None:
                    return scind.binary_opening(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_opening(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_REMOVE:
                return morph.remove(pixel_data, mask, count)
            elif function_name == F_SHRINK:
                return morph.binary_shrink(pixel_data, count)
            elif function_name == F_SKEL:
                return morph.skeletonize(pixel_data, mask)
            elif function_name == F_SPUR:
                return morph.spur(pixel_data, mask, count)
            elif function_name == F_THICKEN:
                return morph.thicken(pixel_data, mask, count)
            elif function_name == F_THIN:
                return morph.thin(pixel_data, mask, count)
            elif function_name == F_VBREAK:
                return morph.vbreak(pixel_data, mask)
            else:
                raise NotImplementedError("Unimplemented morphological function: %s" %
                                          function_name)
        else:
            for i in range(count):
                if function_name == F_BOTHAT:
                    new_pixel_data = morph.black_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_CLOSE:
                                                         
                    new_pixel_data = morph.closing(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_DILATE:
                    new_pixel_data = morph.grey_dilation(pixel_data, mask=mask,
                                                         footprint=strel)
                elif function_name == F_ERODE:
                    new_pixel_data = morph.grey_erosion(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_OPEN:
                    new_pixel_data = morph.opening(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_TOPHAT:
                    new_pixel_data = morph.white_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                else:
                    raise NotImplementedError("Unimplemented morphological function: %s" %
                                              function_name)
                if np.all(new_pixel_data == pixel_data):
                    break;
                pixel_data = new_pixel_data
            return pixel_data
Esempio n. 13
0
 def fun(im, footprint=None):
     return cpmorph.fill_labeled_holes(im, size_fn=small_hole_fn)
Esempio n. 14
0
    def run_function(self, function, pixel_data, mask):
        '''Apply the function once to the image, returning the result'''
        count = function.repeat_count
        function_name = function.function.value
        scale = function.scale.value
        custom_repeats = function.custom_repeats.value
        
        is_binary =  pixel_data.dtype.kind == 'b'
        if function.structuring_element == SE_ARBITRARY:
            strel = np.array(function.strel.get_matrix())
        elif function.structuring_element == SE_DISK:
            strel = morph.strel_disk(scale / 2.0)
        elif function.structuring_element == SE_DIAMOND:
            strel = morph.strel_diamond(scale / 2.0)
        elif function.structuring_element == SE_LINE:
            strel = morph.strel_line(scale, function.angle.value)
        elif function.structuring_element == SE_OCTAGON:
            strel = morph.strel_octagon(scale / 2.0)
        elif function.structuring_element == SE_PAIR:
            strel = morph.strel_pair(function.x_offset.value,
                                     function.y_offset.value)
        elif function.structuring_element == SE_PERIODIC_LINE:
            xoff = function.x_offset.value
            yoff = function.y_offset.value
            n = max(scale / 2.0 / np.sqrt(float(xoff*xoff+yoff*yoff)), 1)
            strel = morph.strel_periodicline(
                xoff, yoff, n)
        elif function.structuring_element == SE_RECTANGLE:
            strel = morph.strel_rectangle(
                function.width.value, function.height.value)
        else:
            strel = morph.strel_square(scale)
        
        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL, F_HBREAK, F_LIFE, F_MAJORITY, 
                              F_REMOVE, F_SHRINK, F_SKEL, F_SKELPE, F_SPUR, 
                              F_THICKEN, F_THIN, F_VBREAK) 
            and not is_binary):
            # Apply a very crude threshold to the image for binary algorithms
            logger.warning("Warning: converting image to binary for %s\n"%
                           function_name)
            pixel_data = pixel_data != 0

        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL,
                              F_HBREAK, F_INVERT, F_LIFE, F_MAJORITY, F_REMOVE,
                              F_SHRINK,
                              F_SKEL, F_SKELPE, F_SPUR, F_THICKEN, F_THIN, 
                              F_VBREAK) or
            (is_binary and
             function_name in (F_CLOSE, F_DILATE, F_ERODE, F_OPEN))):
            # All of these have an iterations argument or it makes no
            # sense to iterate
            if function_name == F_BRANCHPOINTS:
                return morph.branchpoints(pixel_data, mask)
            elif function_name == F_BRIDGE:
                return morph.bridge(pixel_data, mask, count)
            elif function_name == F_CLEAN:
                return morph.clean(pixel_data, mask, count)
            elif function_name == F_CLOSE:
                if mask is None:
                    return scind.binary_closing(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_closing(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_CONVEX_HULL:
                if mask is None:
                    return morph.convex_hull_image(pixel_data)
                else:
                    return morph.convex_hull_image(pixel_data & mask)
            elif function_name == F_DIAG:
                return morph.diag(pixel_data, mask, count)
            elif function_name == F_DILATE:
                return scind.binary_dilation(pixel_data, 
                                             strel,
                                             iterations=count,
                                             mask=mask)
            elif function_name == F_DISTANCE:
                image = scind.distance_transform_edt(pixel_data)
                if function.rescale_values.value:
                    image = image / np.max(image)
                return image
            elif function_name == F_ENDPOINTS:
                return morph.endpoints(pixel_data, mask)
            elif function_name == F_ERODE:
                return scind.binary_erosion(pixel_data, strel,
                                            iterations = count,
                                            mask = mask)
            elif function_name == F_FILL:
                return morph.fill(pixel_data, mask, count)
            elif function_name == F_FILL_SMALL:
                def small_fn(area, foreground):
                    return (not foreground) and (area <= custom_repeats)
                return morph.fill_labeled_holes(pixel_data, mask, small_fn)
            elif function_name == F_HBREAK:
                return morph.hbreak(pixel_data, mask, count)
            elif function_name == F_INVERT:
                if is_binary:
                    if mask is None:
                        return ~ pixel_data
                    result = pixel_data.copy()
                    result[mask] = ~result[mask]
                    return result
                elif mask is None:
                    return 1-pixel_data
                else:
                    result = pixel_data.copy()
                    result[mask]  = 1-result[mask]
                    return result
            elif function_name == F_LIFE:
                return morph.life(pixel_data, count)
            elif function_name == F_MAJORITY:
                return morph.majority(pixel_data, mask, count)
            elif function_name == F_OPEN:
                if mask is None:
                    return scind.binary_opening(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_opening(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_REMOVE:
                return morph.remove(pixel_data, mask, count)
            elif function_name == F_SHRINK:
                return morph.binary_shrink(pixel_data, count)
            elif function_name == F_SKEL:
                return morph.skeletonize(pixel_data, mask)
            elif function_name == F_SKELPE:
                return morph.skeletonize(
                    pixel_data, mask,
                    scind.distance_transform_edt(pixel_data) *
                    poisson_equation(pixel_data))
            elif function_name == F_SPUR:
                return morph.spur(pixel_data, mask, count)
            elif function_name == F_THICKEN:
                return morph.thicken(pixel_data, mask, count)
            elif function_name == F_THIN:
                return morph.thin(pixel_data, mask, count)
            elif function_name == F_VBREAK:
                return morph.vbreak(pixel_data, mask)
            else:
                raise NotImplementedError("Unimplemented morphological function: %s" %
                                          function_name)
        else:
            for i in range(count):
                if function_name == F_BOTHAT:
                    new_pixel_data = morph.black_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_CLOSE:
                                                         
                    new_pixel_data = morph.closing(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_DILATE:
                    new_pixel_data = morph.grey_dilation(pixel_data, mask=mask,
                                                         footprint=strel)
                elif function_name == F_ERODE:
                    new_pixel_data = morph.grey_erosion(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_OPEN:
                    new_pixel_data = morph.opening(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_TOPHAT:
                    new_pixel_data = morph.white_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                else:
                    raise NotImplementedError("Unimplemented morphological function: %s" %
                                              function_name)
                if np.all(new_pixel_data == pixel_data):
                    break;
                pixel_data = new_pixel_data
            return pixel_data
Esempio n. 15
0
 def run(self, workspace):
     '''Run the module on the image set'''
     seed_objects_name = self.seed_objects_name.value
     skeleton_name = self.image_name.value
     seed_objects = workspace.object_set.get_objects(seed_objects_name)
     labels = seed_objects.segmented
     labels_count = np.max(labels)
     label_range = np.arange(labels_count,dtype=np.int32)+1
     
     skeleton_image = workspace.image_set.get_image(
         skeleton_name, must_be_binary = True)
     skeleton = skeleton_image.pixel_data
     if skeleton_image.has_mask:
         skeleton = skeleton & skeleton_image.mask
     try:
         labels = skeleton_image.crop_image_similarly(labels)
     except:
         labels, m1 = cpo.size_similarly(skeleton, labels)
         labels[~m1] = 0
     #
     # The following code makes a ring around the seed objects with
     # the skeleton trunks sticking out of it.
     #
     # Create a new skeleton with holes at the seed objects
     # First combine the seed objects with the skeleton so
     # that the skeleton trunks come out of the seed objects.
     #
     # Erode the labels once so that all of the trunk branchpoints
     # will be within the labels
     #
     #
     # Dilate the objects, then subtract them to make a ring
     #
     my_disk = morph.strel_disk(1.5).astype(int)
     dilated_labels = grey_dilation(labels, footprint=my_disk)
     seed_mask = dilated_labels > 0
     combined_skel = skeleton | seed_mask
     
     closed_labels = grey_erosion(dilated_labels,
                                  footprint = my_disk)
     seed_center = closed_labels > 0
     combined_skel = combined_skel & (~seed_center)
     #
     # Fill in single holes (but not a one-pixel hole made by
     # a one-pixel image)
     #
     if self.wants_to_fill_holes:
         def size_fn(area, is_object):
             return (~ is_object) and (area <= self.maximum_hole_size.value)
         combined_skel = morph.fill_labeled_holes(
             combined_skel, ~seed_center, size_fn)
     #
     # Reskeletonize to make true branchpoints at the ring boundaries
     #
     combined_skel = morph.skeletonize(combined_skel)
     #
     # The skeleton outside of the labels
     #
     outside_skel = combined_skel & (dilated_labels == 0)
     #
     # Associate all skeleton points with seed objects
     #
     dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                 dilated_labels,
                                                 combined_skel, 1)
     #
     # Get rid of any branchpoints not connected to seeds
     #
     combined_skel[dlabels == 0] = False
     #
     # Find the branchpoints
     #
     branch_points = morph.branchpoints(combined_skel)
     #
     # Odd case: when four branches meet like this, branchpoints are not
     # assigned because they are arbitrary. So assign them.
     #
     # .  .
     #  B.
     #  .B
     # .  .
     #
     odd_case = (combined_skel[:-1,:-1] & combined_skel[1:,:-1] &
                 combined_skel[:-1,1:] & combined_skel[1,1])
     branch_points[:-1,:-1][odd_case] = True
     branch_points[1:,1:][odd_case] = True
     #
     # Find the branching counts for the trunks (# of extra branches
     # eminating from a point other than the line it might be on).
     #
     branching_counts = morph.branchings(combined_skel)
     branching_counts = np.array([0,0,0,1,2])[branching_counts]
     #
     # Only take branches within 1 of the outside skeleton
     #
     dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
     branching_counts[~dilated_skel] = 0
     #
     # Find the endpoints
     #
     end_points = morph.endpoints(combined_skel)
     #
     # We use two ranges for classification here:
     # * anything within one pixel of the dilated image is a trunk
     # * anything outside of that range is a branch
     #
     nearby_labels = dlabels.copy()
     nearby_labels[distance_map > 1.5] = 0
     
     outside_labels = dlabels.copy()
     outside_labels[nearby_labels > 0] = 0
     #
     # The trunks are the branchpoints that lie within one pixel of
     # the dilated image.
     #
     if labels_count > 0:
         trunk_counts = fix(scind.sum(branching_counts, nearby_labels, 
                                      label_range)).astype(int)
     else:
         trunk_counts = np.zeros((0,),int)
     #
     # The branches are the branchpoints that lie outside the seed objects
     #
     if labels_count > 0:
         branch_counts = fix(scind.sum(branch_points, outside_labels, 
                                       label_range))
     else:
         branch_counts = np.zeros((0,),int)
     #
     # Save the endpoints
     #
     if labels_count > 0:
         end_counts = fix(scind.sum(end_points, outside_labels, label_range))
     else:
         end_counts = np.zeros((0,), int)
     #
     # Save measurements
     #
     m = workspace.measurements
     assert isinstance(m, cpmeas.Measurements)
     feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
     m.add_measurement(seed_objects_name, feature, trunk_counts)
     feature = "_".join((C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, 
                         skeleton_name))
     m.add_measurement(seed_objects_name, feature, branch_counts)
     feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
     m.add_measurement(seed_objects_name, feature, end_counts)
     #
     # Collect the graph information
     #
     if self.wants_neuron_graph:
         trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
         intensity_image = workspace.image_set.get_image(
             self.intensity_image_name.value)
         edge_graph, vertex_graph = self.make_neuron_graph(
             combined_skel, dlabels, 
             trunk_mask,
             branch_points & ~trunk_mask,
             end_points,
             intensity_image.pixel_data)
         #
         # Add an image number column to both and change vertex index
         # to vertex number (one-based)
         #
         image_number = workspace.measurements.image_set_number
         vertex_graph = np.rec.fromarrays(
             (np.ones(len(vertex_graph)) * image_number,
              np.arange(1, len(vertex_graph) + 1),
              vertex_graph['i'],
              vertex_graph['j'],
              vertex_graph['labels'],
              vertex_graph['kind']),
             names = ("image_number", "vertex_number", "i", "j",
                      "labels", "kind"))
         
         edge_graph = np.rec.fromarrays(
             (np.ones(len(edge_graph)) * image_number,
              edge_graph["v1"],
              edge_graph["v2"],
              edge_graph["length"],
              edge_graph["total_intensity"]),
             names = ("image_number", "v1", "v2", "length", 
                      "total_intensity"))
         
         path = self.directory.get_absolute_path(m)
         edge_file = m.apply_metadata(self.edge_file_name.value)
         edge_path = os.path.abspath(os.path.join(path, edge_file))
         vertex_file = m.apply_metadata(self.vertex_file_name.value)
         vertex_path = os.path.abspath(os.path.join(path, vertex_file))
         d = self.get_dictionary(workspace.image_set_list)
         for file_path, table, fmt in (
             (edge_path, edge_graph, "%d,%d,%d,%d,%.4f"),
             (vertex_path, vertex_graph, "%d,%d,%d,%d,%d,%s")):
             #
             # Delete files first time through / otherwise append
             #
             if not d.has_key(file_path):
                 d[file_path] = True
                 if os.path.exists(file_path):
                     if workspace.frame is not None:
                         import wx
                         if wx.MessageBox(
                             "%s already exists. Do you want to overwrite it?" %
                             file_path, "Warning: overwriting file",
                             style = wx.YES_NO, 
                             parent = workspace.frame) != wx.YES:
                             raise ValueError("Can't overwrite %s" % file_path)
                     os.remove(file_path)
                 fd = open(file_path, 'wt')
                 header = ','.join(table.dtype.names)
                 fd.write(header + '\n')
             else:
                 fd = open(file_path, 'at')
             np.savetxt(fd, table, fmt)
             fd.close()
             if workspace.frame is not None:
                 workspace.display_data.edge_graph = edge_graph
                 workspace.display_data.vertex_graph = vertex_graph
     #
     # Make the display image
     #
     if workspace.frame is not None or self.wants_branchpoint_image:
         branchpoint_image = np.zeros((skeleton.shape[0],
                                       skeleton.shape[1],
                                       3))
         trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
         branch_mask = branch_points & (outside_labels != 0)
         end_mask = end_points & (outside_labels != 0)
         branchpoint_image[outside_skel,:] = 1
         branchpoint_image[trunk_mask | branch_mask | end_mask,:] = 0
         branchpoint_image[trunk_mask,0] = 1
         branchpoint_image[branch_mask,1] = 1
         branchpoint_image[end_mask, 2] = 1
         branchpoint_image[dilated_labels != 0,:] *= .875
         branchpoint_image[dilated_labels != 0,:] += .1
         if workspace.frame:
             workspace.display_data.branchpoint_image = branchpoint_image
         if self.wants_branchpoint_image:
             bi = cpi.Image(branchpoint_image,
                            parent_image = skeleton_image)
             workspace.image_set.add(self.branchpoint_image_name.value, bi)
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image = workspace.image_set.get_image(self.image_name.value,
                                              must_be_grayscale = True)
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        elif self.threshold_method == cpthresh.TM_BINARY_IMAGE:
            binary_image = workspace.image_set.get_image(self.binary_image.value,
                                                         must_be_binary = True)
            local_threshold = np.ones(img.shape) * np.max(img) + np.finfo(float).eps
            local_threshold[binary_image.pixel_data] = np.min(img) - np.finfo(float).eps
            global_threshold = cellprofiler.cpmath.otsu.otsu(img[mask],
                        self.threshold_range.min,
                        self.threshold_range.max)
            has_threshold = True
        else:
            local_threshold,global_threshold = self.get_threshold(img, mask, None, workspace)
            has_threshold = True
        
        if has_threshold:
            thresholded_image = img > local_threshold
        
        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0,:], labels_in[-1,:], labels_in[:,0], labels_in[:,-1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in)+1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]
        
        labels_in[(~ is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp
        
        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances,(i,j) = scind.distance_transform_edt(labels_in == 0, 
                                                               return_indices = True)
                labels_out = np.zeros(labels_in.shape,int)
                dilate_mask = distances <= self.distance_to_dilate.value 
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in, 
                                                  thresholded_image,
                                                  1.0)
                labels_out[distances>self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in>0] 
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            # 
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, 
                                             thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image, 
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1-img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img, 
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                                objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out,
                                   objects.segmented,
                                   range(np.max(objects.segmented)+1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image = image)
                workspace.image_set.add(self.new_primary_outlines_name.value, 
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out) 
        if workspace.frame != None:
            object_area = np.sum(segmented_out > 0)
            object_pct = 100 * object_area / np.product(segmented_out.shape)
                
            my_frame=workspace.create_or_find_figure(title="IdentifySecondaryObjects, image cycle #%d"%(
                workspace.measurements.image_set_number),subplots=(2,2))
            title = "Input image, cycle #%d"%(workspace.image_set.number+1)
            my_frame.subplot_imshow_grayscale(0, 0, img, title)
            my_frame.subplot_imshow_labels(1, 0, segmented_out, "Labeled image",
                                           sharex = my_frame.subplot(0,0),
                                           sharey = my_frame.subplot(0,0))

            outline_img = np.dstack((img, img, img))
            cpmi.draw_outline(outline_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(0, 1, outline_img, "Outlined image",
                                    normalize=False,
                                    sharex = my_frame.subplot(0,0),
                                    sharey = my_frame.subplot(0,0))
            
            primary_img = np.dstack((img, img, img))
            cpmi.draw_outline(primary_img, primary_outline > 0,
                              cpprefs.get_primary_outline_color())
            cpmi.draw_outline(primary_img, secondary_outline > 0,
                              cpprefs.get_secondary_outline_color())
            my_frame.subplot_imshow(1, 1, primary_img,
                                    "Primary and output outlines",
                                    normalize=False,
                                    sharex = my_frame.subplot(0,0),
                                    sharey = my_frame.subplot(0,0))
            if global_threshold is not None:
                my_frame.status_bar.SetFields(
                    ["Threshold: %.3f" % global_threshold,
                     "Area covered by objects: %.1f %%" % object_pct])
            else:
                my_frame.status_bar.SetFields(
                    ["Area covered by objects: %.1f %%" % object_pct])
        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image = image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add the background measurements if made
        #
        measurements = workspace.measurements
        if has_threshold:
            if isinstance(local_threshold,np.ndarray):
                ave_threshold = np.mean(local_threshold)
            else:
                ave_threshold = local_threshold
            
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_FINAL_THRESHOLD%(objname),
                                         np.array([ave_threshold],
                                                     dtype=float))
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_ORIG_THRESHOLD%(objname),
                                         np.array([global_threshold],
                                                      dtype=float))
            wv = cpthresh.weighted_variance(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_WEIGHTED_VARIANCE%(objname),
                                         np.array([wv],dtype=float))
            entropies = cpthresh.sum_of_entropies(img, mask, local_threshold)
            measurements.add_measurement(cpmeas.IMAGE,
                                         cpmi.FF_SUM_OF_ENTROPIES%(objname),
                                         np.array([entropies],dtype=float))
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT%objname,
                                     children_per_parent)
        measurements.add_measurement(objname,
                                     cpmi.FF_PARENT%self.primary_objects.value,
                                     parents_of_children)
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(new_objects,
                                             self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(measurements,
                                               self.new_primary_objects_name.value,
                                               np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(measurements,
                                                  self.new_primary_objects_name.value,
                                                  new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value,
                 new_objects, self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value,
                 objects_out, objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(parent_name,
                                             cpmi.FF_CHILDREN_COUNT%child_name,
                                             children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT%parent_name,
                                             parents_of_children)
Esempio n. 17
0
    def run_function(self, function, pixel_data, mask):
        '''Apply the function once to the image, returning the result'''
        count = function.repeat_count
        function_name = function.function.value
        scale = function.scale.value
        custom_repeats = function.custom_repeats.value
        
        is_binary =  pixel_data.dtype.kind == 'b'
        if function.structuring_element == SE_ARBITRARY:
            strel = np.array(function.strel.get_matrix())
        elif function.structuring_element == SE_DISK:
            strel = morph.strel_disk(scale / 2.0)
        elif function.structuring_element == SE_DIAMOND:
            strel = morph.strel_diamond(scale / 2.0)
        elif function.structuring_element == SE_LINE:
            strel = morph.strel_line(scale, function.angle.value)
        elif function.structuring_element == SE_OCTAGON:
            strel = morph.strel_octagon(scale / 2.0)
        elif function.structuring_element == SE_PAIR:
            strel = morph.strel_pair(function.x_offset.value,
                                     function.y_offset.value)
        elif function.structuring_element == SE_PERIODIC_LINE:
            xoff = function.x_offset.value
            yoff = function.y_offset.value
            n = max(scale / 2.0 / np.sqrt(float(xoff*xoff+yoff*yoff)), 1)
            strel = morph.strel_periodicline(
                xoff, yoff, n)
        elif function.structuring_element == SE_RECTANGLE:
            strel = morph.strel_rectangle(
                function.width.value, function.height.value)
        else:
            strel = morph.strel_square(scale)
        
        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL, F_HBREAK, F_LIFE, F_MAJORITY, 
                              F_REMOVE, F_SHRINK, F_SKEL, F_SKELPE, F_SPUR, 
                              F_THICKEN, F_THIN, F_VBREAK) 
            and not is_binary):
            # Apply a very crude threshold to the image for binary algorithms
            logger.warning("Warning: converting image to binary for %s\n"%
                           function_name)
            pixel_data = pixel_data != 0

        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL,
                              F_HBREAK, F_INVERT, F_LIFE, F_MAJORITY, F_REMOVE,
                              F_SHRINK,
                              F_SKEL, F_SKELPE, F_SPUR, F_THICKEN, F_THIN, 
                              F_VBREAK) or
            (is_binary and
             function_name in (F_CLOSE, F_DILATE, F_ERODE, F_OPEN))):
            # All of these have an iterations argument or it makes no
            # sense to iterate
            if function_name == F_BRANCHPOINTS:
                return morph.branchpoints(pixel_data, mask)
            elif function_name == F_BRIDGE:
                return morph.bridge(pixel_data, mask, count)
            elif function_name == F_CLEAN:
                return morph.clean(pixel_data, mask, count)
            elif function_name == F_CLOSE:
                if mask is None:
                    return scind.binary_closing(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_closing(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_CONVEX_HULL:
                if mask is None:
                    return morph.convex_hull_image(pixel_data)
                else:
                    return morph.convex_hull_image(pixel_data & mask)
            elif function_name == F_DIAG:
                return morph.diag(pixel_data, mask, count)
            elif function_name == F_DILATE:
                return scind.binary_dilation(pixel_data, 
                                             strel,
                                             iterations=count,
                                             mask=mask)
            elif function_name == F_DISTANCE:
                image = scind.distance_transform_edt(pixel_data)
                if function.rescale_values.value:
                    image = image / np.max(image)
                return image
            elif function_name == F_ENDPOINTS:
                return morph.endpoints(pixel_data, mask)
            elif function_name == F_ERODE:
                return scind.binary_erosion(pixel_data, strel,
                                            iterations = count,
                                            mask = mask)
            elif function_name == F_FILL:
                return morph.fill(pixel_data, mask, count)
            elif function_name == F_FILL_SMALL:
                def small_fn(area, foreground):
                    return (not foreground) and (area <= custom_repeats)
                return morph.fill_labeled_holes(pixel_data, mask, small_fn)
            elif function_name == F_HBREAK:
                return morph.hbreak(pixel_data, mask, count)
            elif function_name == F_INVERT:
                if is_binary:
                    if mask is None:
                        return ~ pixel_data
                    result = pixel_data.copy()
                    result[mask] = ~result[mask]
                    return result
                elif mask is None:
                    return 1-pixel_data
                else:
                    result = pixel_data.copy()
                    result[mask]  = 1-result[mask]
                    return result
            elif function_name == F_LIFE:
                return morph.life(pixel_data, count)
            elif function_name == F_MAJORITY:
                return morph.majority(pixel_data, mask, count)
            elif function_name == F_OPEN:
                if mask is None:
                    return scind.binary_opening(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_opening(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_REMOVE:
                return morph.remove(pixel_data, mask, count)
            elif function_name == F_SHRINK:
                return morph.binary_shrink(pixel_data, count)
            elif function_name == F_SKEL:
                return morph.skeletonize(pixel_data, mask)
            elif function_name == F_SKELPE:
                return morph.skeletonize(
                    pixel_data, mask,
                    scind.distance_transform_edt(pixel_data) *
                    poisson_equation(pixel_data))
            elif function_name == F_SPUR:
                return morph.spur(pixel_data, mask, count)
            elif function_name == F_THICKEN:
                return morph.thicken(pixel_data, mask, count)
            elif function_name == F_THIN:
                return morph.thin(pixel_data, mask, count)
            elif function_name == F_VBREAK:
                return morph.vbreak(pixel_data, mask)
            else:
                raise NotImplementedError("Unimplemented morphological function: %s" %
                                          function_name)
        else:
            for i in range(count):
                if function_name == F_BOTHAT:
                    new_pixel_data = morph.black_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_CLOSE:
                                                         
                    new_pixel_data = morph.closing(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_DILATE:
                    new_pixel_data = morph.grey_dilation(pixel_data, mask=mask,
                                                         footprint=strel)
                elif function_name == F_ERODE:
                    new_pixel_data = morph.grey_erosion(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_OPEN:
                    new_pixel_data = morph.opening(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_TOPHAT:
                    new_pixel_data = morph.white_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                else:
                    raise NotImplementedError("Unimplemented morphological function: %s" %
                                              function_name)
                if np.all(new_pixel_data == pixel_data):
                    break;
                pixel_data = new_pixel_data
            return pixel_data
Esempio n. 18
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(skeleton_name,
                                                       must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels, footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:

            def size_fn(area, is_object):
                return (~is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(combined_skel,
                                                     ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1]
                    & combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(
                scind.sum(branching_counts, nearby_labels,
                          label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0, ), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(
                scind.sum(branch_points, outside_labels, label_range))
        else:
            branch_counts = np.zeros((0, ), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels,
                                       label_range))
        else:
            end_counts = np.zeros((0, ), int)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join(
            (C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                combined_skel, dlabels, trunk_mask,
                branch_points & ~trunk_mask, end_points,
                intensity_image.pixel_data)
            #
            # Add an image number column to both and change vertex index
            # to vertex number (one-based)
            #
            image_number = workspace.measurements.image_set_number
            vertex_graph = np.rec.fromarrays(
                (np.ones(len(vertex_graph)) * image_number,
                 np.arange(1,
                           len(vertex_graph) + 1), vertex_graph['i'],
                 vertex_graph['j'], vertex_graph['labels'],
                 vertex_graph['kind']),
                names=("image_number", "vertex_number", "i", "j", "labels",
                       "kind"))

            edge_graph = np.rec.fromarrays(
                (np.ones(len(edge_graph)) * image_number, edge_graph["v1"],
                 edge_graph["v2"], edge_graph["length"],
                 edge_graph["total_intensity"]),
                names=("image_number", "v1", "v2", "length",
                       "total_intensity"))

            path = self.directory.get_absolute_path(m)
            edge_file = m.apply_metadata(self.edge_file_name.value)
            edge_path = os.path.abspath(os.path.join(path, edge_file))
            vertex_file = m.apply_metadata(self.vertex_file_name.value)
            vertex_path = os.path.abspath(os.path.join(path, vertex_file))
            d = self.get_dictionary(workspace.image_set_list)
            for file_path, table, fmt in ((edge_path, edge_graph,
                                           "%d,%d,%d,%d,%.4f"),
                                          (vertex_path, vertex_graph,
                                           "%d,%d,%d,%d,%d,%s")):
                #
                # Delete files first time through / otherwise append
                #
                if not d.has_key(file_path):
                    d[file_path] = True
                    if os.path.exists(file_path):
                        if workspace.frame is not None:
                            import wx
                            if wx.MessageBox(
                                    "%s already exists. Do you want to overwrite it?"
                                    % file_path,
                                    "Warning: overwriting file",
                                    style=wx.YES_NO,
                                    parent=workspace.frame) != wx.YES:
                                raise ValueError("Can't overwrite %s" %
                                                 file_path)
                        os.remove(file_path)
                    fd = open(file_path, 'wt')
                    header = ','.join(table.dtype.names)
                    fd.write(header + '\n')
                else:
                    fd = open(file_path, 'at')
                np.savetxt(fd, table, fmt)
                fd.close()
                if workspace.frame is not None:
                    workspace.display_data.edge_graph = edge_graph
                    workspace.display_data.vertex_graph = vertex_graph
        #
        # Make the display image
        #
        if workspace.frame is not None or self.wants_branchpoint_image:
            branchpoint_image = np.zeros(
                (skeleton.shape[0], skeleton.shape[1], 3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if workspace.frame:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image, parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
Esempio n. 19
0
 def fun(im, footprint=None):
     return cpmorph.fill_labeled_holes(im, size_fn=small_hole_fn)
Esempio n. 20
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count,dtype=np.int32)+1
        
        skeleton_image = workspace.image_set.get_image(
            skeleton_name, must_be_binary = True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask
        
        closed_labels = grey_erosion(dilated_labels,
                                     footprint = my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:
            def size_fn(area, is_object):
                return (~ is_object) and (area <= self.maximum_hole_size.value)
            combined_skel = morph.fill_labeled_holes(
                combined_skel, ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1,:-1] & combined_skel[1:,:-1] &
                    combined_skel[:-1,1:] & combined_skel[1,1])
        branch_points[:-1,:-1][odd_case] = True
        branch_points[1:,1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0,0,0,1,2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0
        
        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(scind.sum(branching_counts, nearby_labels, 
                                         label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0,),int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(scind.sum(branch_points, outside_labels, 
                                          label_range))
        else:
            branch_counts = np.zeros((0,),int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels, label_range))
        else:
            end_counts = np.zeros((0,), int)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join((C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, 
                            skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                combined_skel, dlabels, 
                trunk_mask,
                branch_points & ~trunk_mask,
                end_points,
                intensity_image.pixel_data)

            image_number = workspace.measurements.image_set_number
            
            edge_path, vertex_path = self.get_graph_file_paths(m, m.image_number)
            workspace.interaction_request(
                self, m.image_number, edge_path, edge_graph,
                vertex_path, vertex_graph, headless_ok = True)
            
            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros((skeleton.shape[0],
                                          skeleton.shape[1],
                                          3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel,:] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask,:] = 0
            branchpoint_image[trunk_mask,0] = 1
            branchpoint_image[branch_mask,1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0,:] *= .875
            branchpoint_image[dilated_labels != 0,:] += .1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image,
                               parent_image = skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
Esempio n. 21
0
    def run_function(self, function_name, pixel_data, mask, count, scale,
                     custom_repeats):
        '''Apply the function once to the image, returning the result'''
        is_binary =  pixel_data.dtype.kind == 'b'
        strel = morph.strel_disk(scale / 2.0)
        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL, F_HBREAK, F_LIFE, F_MAJORITY, 
                              F_REMOVE, F_SHRINK, F_SKEL, F_SPUR, F_THICKEN, 
                              F_THIN, F_VBREAK) 
            and not is_binary):
            # Apply a very crude threshold to the image for binary algorithms
            logger.warning("Warning: converting image to binary for %s\n"%
                           function_name)
            pixel_data = pixel_data != 0

        if (function_name in (F_BRANCHPOINTS, F_BRIDGE, F_CLEAN, F_DIAG, 
                              F_CONVEX_HULL, F_DISTANCE, F_ENDPOINTS, F_FILL,
                              F_FILL_SMALL,
                              F_HBREAK, F_INVERT, F_LIFE, F_MAJORITY, F_REMOVE,
                              F_SHRINK,
                              F_SKEL, F_SPUR, F_THICKEN, F_THIN, F_VBREAK) or
            (is_binary and
             function_name in (F_CLOSE, F_DILATE, F_ERODE, F_OPEN))):
            # All of these have an iterations argument or it makes no
            # sense to iterate
            if function_name == F_BRANCHPOINTS:
                return morph.branchpoints(pixel_data, mask)
            elif function_name == F_BRIDGE:
                return morph.bridge(pixel_data, mask, count)
            elif function_name == F_CLEAN:
                return morph.clean(pixel_data, mask, count)
            elif function_name == F_CLOSE:
                if mask is None:
                    return scind.binary_closing(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_closing(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_CONVEX_HULL:
                if mask is None:
                    return morph.convex_hull_image(pixel_data)
                else:
                    return morph.convex_hull_image(pixel_data & mask)
            elif function_name == F_DIAG:
                return morph.diag(pixel_data, mask, count)
            elif function_name == F_DILATE:
                return scind.binary_dilation(pixel_data, 
                                             strel,
                                             iterations=count,
                                             mask=mask)
            elif function_name == F_DISTANCE:
                image = scind.distance_transform_edt(pixel_data)
                img_max = np.max(image)
                if img_max > 0:
                    image = image / img_max
                return image
            elif function_name == F_ENDPOINTS:
                return morph.endpoints(pixel_data, mask)
            elif function_name == F_ERODE:
                return scind.binary_erosion(pixel_data, strel,
                                            iterations = count,
                                            mask = mask)
            elif function_name == F_FILL:
                return morph.fill(pixel_data, mask, count)
            elif function_name == F_FILL_SMALL:
                def small_fn(area, foreground):
                    return (not foreground) and (area <= custom_repeats)
                return morph.fill_labeled_holes(pixel_data, mask, small_fn)
            elif function_name == F_HBREAK:
                return morph.hbreak(pixel_data, mask, count)
            elif function_name == F_INVERT:
                if is_binary:
                    if mask is None:
                        return ~ pixel_data
                    result = pixel_data.copy()
                    result[mask] = ~result[mask]
                    return result
                elif mask is None:
                    return 1-pixel_data
                else:
                    result = pixel_data.copy()
                    result[mask]  = 1-result[mask]
                    return result
            elif function_name == F_LIFE:
                return morph.life(pixel_data, count)
            elif function_name == F_MAJORITY:
                return morph.majority(pixel_data, mask, count)
            elif function_name == F_OPEN:
                if mask is None:
                    return scind.binary_opening(pixel_data,
                                                strel,
                                                iterations = count)
                else:
                    return (scind.binary_opening(pixel_data & mask, 
                                                 strel,
                                                 iterations = count) |
                            (pixel_data & ~ mask))
            elif function_name == F_REMOVE:
                return morph.remove(pixel_data, mask, count)
            elif function_name == F_SHRINK:
                return morph.binary_shrink(pixel_data, count)
            elif function_name == F_SKEL:
                return morph.skeletonize(pixel_data, mask)
            elif function_name == F_SPUR:
                return morph.spur(pixel_data, mask, count)
            elif function_name == F_THICKEN:
                return morph.thicken(pixel_data, mask, count)
            elif function_name == F_THIN:
                return morph.thin(pixel_data, mask, count)
            elif function_name == F_VBREAK:
                return morph.vbreak(pixel_data, mask)
            else:
                raise NotImplementedError("Unimplemented morphological function: %s" %
                                          function_name)
        else:
            for i in range(count):
                if function_name == F_BOTHAT:
                    new_pixel_data = morph.black_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_CLOSE:
                                                         
                    new_pixel_data = morph.closing(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_DILATE:
                    new_pixel_data = morph.grey_dilation(pixel_data, mask=mask,
                                                         footprint=strel)
                elif function_name == F_ERODE:
                    new_pixel_data = morph.grey_erosion(pixel_data, mask=mask,
                                                        footprint=strel)
                elif function_name == F_OPEN:
                    new_pixel_data = morph.opening(pixel_data, mask=mask,
                                                   footprint=strel)
                elif function_name == F_TOPHAT:
                    new_pixel_data = morph.white_tophat(pixel_data, mask=mask,
                                                        footprint=strel)
                else:
                    raise NotImplementedError("Unimplemented morphological function: %s" %
                                              function_name)
                if np.all(new_pixel_data == pixel_data):
                    break;
                pixel_data = new_pixel_data
            return pixel_data
Esempio n. 22
0
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image_name = self.image_name.value
        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        workspace.display_data.statistics = []
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        else:
            thresholded_image = self.threshold_image(image_name, workspace)
            has_threshold = True

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0, :], labels_in[-1, :], labels_in[:,
                                                          0], labels_in[:,
                                                                        -1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in) + 1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances, (i, j) = scind.distance_transform_edt(
                    labels_in == 0, return_indices=True)
                labels_out = np.zeros(labels_in.shape, int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in,
                                                  thresholded_image, 1.0)
                labels_out[distances > self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in > 0]
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1 - img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out, objects.segmented,
                                   range(np.max(objects.segmented) + 1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image=image)
                workspace.image_set.add(self.new_primary_outlines_name.value,
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)

        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image=image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add measurements
        #
        measurements = workspace.measurements
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT % objname,
                                     children_per_parent)
        measurements.add_measurement(
            objname, cpmi.FF_PARENT % self.primary_objects.value,
            parents_of_children)
        image_numbers = np.ones(len(parents_of_children), int) *\
            measurements.image_set_number
        mask = parents_of_children > 0
        measurements.add_relate_measurement(
            self.module_num, R_PARENT, self.primary_objects.value,
            self.objects_name.value, image_numbers[mask],
            parents_of_children[mask], image_numbers[mask],
            np.arange(1,
                      len(parents_of_children) + 1)[mask])
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(
                new_objects, self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(
                measurements, self.new_primary_objects_name.value,
                np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(
                measurements, self.new_primary_objects_name.value,
                new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value, new_objects,
                 self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value, objects_out,
                 objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(
                    parent_name, cpmi.FF_CHILDREN_COUNT % child_name,
                    children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT % parent_name,
                                             parents_of_children)
        if self.show_window:
            object_area = np.sum(segmented_out > 0)
            workspace.display_data.object_pct = \
                100 * object_area / np.product(segmented_out.shape)
            workspace.display_data.img = img
            workspace.display_data.segmented_out = segmented_out
            workspace.display_data.primary_labels = objects.segmented
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.object_count = object_count
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image_name = self.image_name.value
        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale = True)
        workspace.display_data.statistics = []
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        else:
            thresholded_image = self.threshold_image(image_name, workspace)
            has_threshold = True
        
        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0,:], labels_in[-1,:], labels_in[:,0], labels_in[:,-1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in)+1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]
        
        labels_in[(~ is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp
        
        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances,(i,j) = scind.distance_transform_edt(labels_in == 0, 
                                                               return_indices = True)
                labels_out = np.zeros(labels_in.shape,int)
                dilate_mask = distances <= self.distance_to_dilate.value 
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in, 
                                                  thresholded_image,
                                                  1.0)
                labels_out[distances>self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in>0] 
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            # 
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, 
                                             thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image, 
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1-img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img, 
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                                objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out,
                                   objects.segmented,
                                   range(np.max(objects.segmented)+1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image = image)
                workspace.image_set.add(self.new_primary_outlines_name.value, 
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)

        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image = image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add measurements
        #
        measurements = workspace.measurements
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT%objname,
                                     children_per_parent)
        measurements.add_measurement(objname,
                                     cpmi.FF_PARENT%self.primary_objects.value,
                                     parents_of_children)
        image_numbers = np.ones(len(parents_of_children), int) *\
            measurements.image_set_number
        mask = parents_of_children > 0
        measurements.add_relate_measurement(
            self.module_num, R_PARENT,
            self.primary_objects.value, self.objects_name.value,
            image_numbers[mask], parents_of_children[mask],
            image_numbers[mask], 
            np.arange(1, len(parents_of_children) + 1)[mask])
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(new_objects,
                                             self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(measurements,
                                               self.new_primary_objects_name.value,
                                               np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(measurements,
                                                  self.new_primary_objects_name.value,
                                                  new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value,
                 new_objects, self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value,
                 objects_out, objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(parent_name,
                                             cpmi.FF_CHILDREN_COUNT%child_name,
                                             children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT%parent_name,
                                             parents_of_children)
        if self.show_window:
            object_area = np.sum(segmented_out > 0)
            workspace.display_data.object_pct = \
                100 * object_area / np.product(segmented_out.shape)
            workspace.display_data.img = img
            workspace.display_data.segmented_out = segmented_out
            workspace.display_data.primary_outline = primary_outline
            workspace.display_data.secondary_outline = secondary_outline
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.object_count = object_count
Esempio n. 24
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(skeleton_name,
                                                       must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels, footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:

            def size_fn(area, is_object):
                return (~is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(combined_skel,
                                                     ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1]
                    & combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(
                scind.sum(branching_counts, nearby_labels,
                          label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0, ), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(
                scind.sum(branch_points, outside_labels, label_range))
        else:
            branch_counts = np.zeros((0, ), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels,
                                       label_range))
        else:
            end_counts = np.zeros((0, ), int)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join(
            (C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                combined_skel, dlabels, trunk_mask,
                branch_points & ~trunk_mask, end_points,
                intensity_image.pixel_data)

            image_number = workspace.measurements.image_set_number

            edge_path, vertex_path = self.get_graph_file_paths(
                m, m.image_number)
            workspace.interaction_request(self,
                                          m.image_number,
                                          edge_path,
                                          edge_graph,
                                          vertex_path,
                                          vertex_graph,
                                          headless_ok=True)

            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros(
                (skeleton.shape[0], skeleton.shape[1], 3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image, parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
Esempio n. 25
0
 def run(self, workspace):
     import cellprofiler.modules.identify as I
     #
     # Get the input and output image names. You need to get the .value
     # because otherwise you'll get the setting object instead of
     # the string name.
     #
     input_image_name = self.input_image_name.value
     prediction_image_name = self.prediction_image_name.value
     input_objects_name = self.input_objects.value
     output_objects_name = self.output_objects.value
     #
     # Get the image set. The image set has all of the images in it.
     # The assert statement makes sure that it really is an image set,
     # but, more importantly, it lets my editor do context-sensitive
     # completion for the image set.
     #
     image_set = workspace.image_set
     assert isinstance(image_set, cpi.ImageSet)
     #
     # Get the input image object. We want a grayscale image here.
     # The image set will convert a color image to a grayscale one
     # and warn the user.
     #
     input_image = image_set.get_image(input_image_name,
                                       must_be_grayscale = True).pixel_data
     #
     # I do something a little odd here and elsewhere. I normalize the
     # brightness by ordering the image pixels by brightness. I notice that
     # the samples vary in intensity (why?) and EM is a scanning technology
     # (right?) so there should be uniform illumination across an image.
     #
     r = np.random.RandomState()
     r.seed(np.sum((input_image * 65535).astype(np.uint16)))
     npixels = np.prod(input_image.shape)
     shape = input_image.shape
     order = np.lexsort([r.uniform(size=npixels),
                         input_image.flatten()])
     input_image = np.zeros(npixels)
     input_image[order] = np.arange(npixels).astype(float) / npixels
     input_image.shape = shape
     
     prediction_image = image_set.get_image(prediction_image_name,
                                            must_be_grayscale=True).pixel_data
     object_set = workspace.object_set
     assert isinstance(object_set, cpo.ObjectSet)
     input_objects = object_set.get_objects(input_objects_name)
     assert isinstance(input_objects, cpo.Objects)
     input_labeling = input_objects.segmented
     #
     # Find the border pixels - 4 connected
     #
     # There will be some repeats in here - I'm being lazy and I'm not
     # removing them. The inaccuracies will be random.
     #
     touch = ((input_labeling[1:, :] != input_labeling[:-1, :]) &
              (input_labeling[1:, :] != 0) &
              (input_labeling[:-1, :] != 0))
     touchpair = np.argwhere(touch)
     touchpair = np.column_stack([
         # touchpair[:,0:2] are the left coords
         touchpair, 
         # touchpair[:,2:4] are the right coords
         touchpair + np.array([1,0], int)[np.newaxis,:],
         # touchpair[:,4] is the identity of the left object
         input_labeling[touchpair[:,0], touchpair[:, 1]],
         # touchpair[:,5] is the identity of the right object
         input_labeling[touchpair[:,0]+1, touchpair[:, 1]]])
     TP_I0 = 0
     TP_J0 = 1
     TP_I1 = 2
     TP_J1 = 3
     TP_L0 = 4
     TP_L1 = 5
         
     touch = ((input_labeling[:, 1:] != input_labeling[:, :-1]) &
               (input_labeling[:, 1:] != 0) &
               (input_labeling[:, :-1] != 0))
     tp2 = np.argwhere(touch)
     
     touchpair = np.vstack([touchpair, np.column_stack([
         tp2, 
         tp2 + np.array([0, 1], int)[np.newaxis,:],
         input_labeling[tp2[:, 0], tp2[:, 1]],
         input_labeling[tp2[:, 0], tp2[:, 1]+1]])])
     if np.any(touch):
         #
         # Broadcast the touchpair counts and scores into sparse arrays.
         # The sparse array convention is to sum duplicates
         #
         counts = coo_matrix(
             (np.ones(touchpair.shape[0], int),
              (touchpair[:,TP_L0], touchpair[:,TP_L1])),
             shape=[input_objects.count+1]*2).toarray()
         scores = coo_matrix(
             (prediction_image[touchpair[:, TP_I0],
                               touchpair[:, TP_J0]] +
              prediction_image[touchpair[:, TP_I1],
                               touchpair[:, TP_J1]],
              (touchpair[:,TP_L0], touchpair[:,TP_L1])),
             shape=[input_objects.count+1]*2).toarray() / 2.0
         scores = scores / counts
         to_remove = ((counts > self.min_border.value) &
                      (scores > 1 - self.min_support.value))
     else:
         to_remove = np.zeros((0,2), bool)
     #
     # For all_connected_components, do forward and backward links and
     # self-to-self links
     #
     remove_pairs = np.vstack([
         np.argwhere(to_remove), 
         np.argwhere(to_remove.transpose()),
         np.column_stack([np.arange(np.max(input_labeling)+1)] * 2)])
     #
     # Find small objects and dark objects
     #
     areas = np.bincount(input_labeling.flatten())
     brightness = np.bincount(input_labeling.flatten(), input_image.flatten())
     brightness = brightness / areas
     #
     to_remove = ((areas < self.min_area.value) | 
                  (brightness < self.darkness.value))
     to_remove[0] = False
     #
     # Find the biggest neighbor to all. If no neighbors, label = 0
     #
     largest = np.zeros(areas.shape[0], np.uint32)
     if np.any(touch):
         largest[:counts.shape[1]] = \
             np.argmax(np.maximum(counts, counts.transpose()), 0)
     remove_pairs = np.vstack([
         remove_pairs,
         np.column_stack([np.arange(len(to_remove))[to_remove],
                          largest[to_remove]])])
     lnumbers = all_connected_components(remove_pairs[:, 0], 
                                         remove_pairs[:, 1]).astype(np.uint32)
     #
     # Renumber.
     #
     output_labeling = lnumbers[input_labeling]
     #
     # Fill holes.
     #
     output_labeling = fill_labeled_holes(output_labeling)
     #
     # Remove the border pixels. This is for the challenge which requires
     # a mask. 
     #
     can_remove = lnumbers[touchpair[:, TP_L0]] != lnumbers[touchpair[:, TP_L1]]
     output_labeling[touchpair[can_remove, TP_I0], 
                     touchpair[can_remove, TP_J0]] = 0
     output_labeling[touchpair[can_remove, TP_I1], 
                     touchpair[can_remove, TP_J1]] = 0
     output_objects = cpo.Objects()
     output_objects.segmented = output_labeling
     object_set.add_objects(output_objects, output_objects_name)
     nobjects = np.max(output_labeling)
     I.add_object_count_measurements(workspace.measurements,
                                     output_objects_name,
                                     nobjects)
     I.add_object_location_measurements(workspace.measurements,
                                        output_objects_name, 
                                        output_labeling, nobjects)
     # Make an outline image
     outline_image = cellprofiler.cpmath.outline.outline(output_labeling).astype(bool)
     out_img = cpi.Image(outline_image,
                         parent_image = image_set.get_image(input_image_name))
     workspace.image_set.add(self.output_outlines_name.value, out_img)
     
     workspace.display_data.input_pixels = input_image
     workspace.display_data.input_labels = input_labeling
     workspace.display_data.output_labels = output_labeling
     workspace.display_data.outlines = outline_image