示例#1
0
    def run(self, workspace):
        """Find the outlines on the current image set

        workspace    - The workspace contains
            pipeline     - instance of cpp for this run
            image_set    - the images in the image set being processed
            object_set   - the objects (labeled masks) in this image set
            measurements - the measurements for this run
            frame        - the parent frame to whatever frame is created. None means don't draw.
        """
        gridding = workspace.get_grid(self.grid_name.value)
        if self.shape_choice == SHAPE_RECTANGLE:
            labels = self.run_rectangle(workspace, gridding)
        elif self.shape_choice == SHAPE_CIRCLE_FORCED:
            labels = self.run_forced_circle(workspace, gridding)
        elif self.shape_choice == SHAPE_CIRCLE_NATURAL:
            labels = self.run_natural_circle(workspace, gridding)
        elif self.shape_choice == SHAPE_NATURAL:
            labels = self.run_natural(workspace, gridding)
        objects = cpo.Objects()
        objects.segmented = labels
        object_count = gridding.rows * gridding.columns
        workspace.object_set.add_objects(objects, self.output_objects_name.value)
        add_object_location_measurements(
            workspace.measurements, self.output_objects_name.value, labels, object_count
        )
        add_object_count_measurements(
            workspace.measurements, self.output_objects_name.value, object_count
        )
        if self.show_window:
            workspace.display_data.gridding = gridding
            workspace.display_data.labels = labels
    def run(self, workspace):
        input_objects = workspace.object_set.get_objects(
            self.object_name.value)
        output_objects = cpo.Objects()
        output_objects.segmented = self.do_labels(input_objects.segmented)
        if input_objects.has_small_removed_segmented:
            output_objects.small_removed_segmented = self.do_labels(
                input_objects.small_removed_segmented)
        if input_objects.has_unedited_segmented:
            output_objects.unedited_segmented = self.do_labels(
                input_objects.unedited_segmented)
        workspace.object_set.add_objects(output_objects,
                                         self.output_object_name.value)
        add_object_count_measurements(
            workspace.measurements,
            self.output_object_name.value,
            np.max(output_objects.segmented),
        )
        add_object_location_measurements(
            workspace.measurements,
            self.output_object_name.value,
            output_objects.segmented,
        )

        if self.show_window:
            workspace.display_data.input_objects_segmented = input_objects.segmented
            workspace.display_data.output_objects_segmented = output_objects.segmented
    def run(self, workspace):
        image_name = self.image_name.value
        objects_name = self.objects_name.value
        image = workspace.image_set.get_image(image_name)
        pixel_data = image.pixel_data

        labels = workspace.interaction_request(
            self, pixel_data, workspace.measurements.image_set_number
        )
        if labels is None:
            # User cancelled. Soldier on as best we can.
            workspace.cancel_request()
            labels = np.zeros(pixel_data.shape[:2], int)
        objects = cpo.Objects()
        objects.segmented = labels
        workspace.object_set.add_objects(objects, objects_name)

        ##################
        #
        # Add measurements
        #
        m = workspace.measurements
        #
        # The object count
        #
        object_count = np.max(labels)
        I.add_object_count_measurements(m, objects_name, object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements(m, objects_name, labels)

        workspace.display_data.labels = labels
        workspace.display_data.pixel_data = pixel_data
示例#4
0
    def run(self, workspace):
        objects_name = self.objects_name.value
        objects = workspace.object_set.get_objects(objects_name)
        assert isinstance(objects, cpo.Objects)
        labels = objects.segmented
        if self.relabel_option == OPTION_SPLIT:
            output_labels, count = scind.label(labels > 0, np.ones((3, 3),
                                                                   bool))
        else:
            if self.merge_option == UNIFY_DISTANCE:
                mask = labels > 0
                if self.distance_threshold.value > 0:
                    #
                    # Take the distance transform of the reverse of the mask
                    # and figure out what points are less than 1/2 of the
                    # distance from an object.
                    #
                    d = scind.distance_transform_edt(~mask)
                    mask = d < self.distance_threshold.value / 2 + 1
                output_labels, count = scind.label(mask, np.ones((3, 3), bool))
                output_labels[labels == 0] = 0
                if self.wants_image:
                    output_labels = self.filter_using_image(workspace, mask)
            elif self.merge_option == UNIFY_PARENT:
                parents_name = self.parent_object.value
                parents_of = workspace.measurements[objects_name, "_".join(
                    (C_PARENT, parents_name))]
                output_labels = labels.copy().astype(np.uint32)
                output_labels[labels > 0] = parents_of[labels[labels > 0] - 1]
                if self.merging_method == UM_CONVEX_HULL:
                    ch_pts, n_pts = morph.convex_hull(output_labels)
                    ijv = morph.fill_convex_hulls(ch_pts, n_pts)
                    output_labels[ijv[:, 0], ijv[:, 1]] = ijv[:, 2]

        output_objects = cpo.Objects()
        output_objects.segmented = output_labels
        if objects.has_small_removed_segmented:
            output_objects.small_removed_segmented = copy_labels(
                objects.small_removed_segmented, output_labels)
        if objects.has_unedited_segmented:
            output_objects.unedited_segmented = copy_labels(
                objects.unedited_segmented, output_labels)
        output_objects.parent_image = objects.parent_image
        workspace.object_set.add_objects(output_objects,
                                         self.output_objects_name.value)

        measurements = workspace.measurements
        add_object_count_measurements(
            measurements,
            self.output_objects_name.value,
            np.max(output_objects.segmented),
        )
        add_object_location_measurements(measurements,
                                         self.output_objects_name.value,
                                         output_objects.segmented)

        #
        # Relate the output objects to the input ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = objects.relate_children(
            output_objects)
        measurements.add_measurement(
            self.objects_name.value,
            FF_CHILDREN_COUNT % self.output_objects_name.value,
            children_per_parent,
        )
        measurements.add_measurement(
            self.output_objects_name.value,
            FF_PARENT % self.objects_name.value,
            parents_of_children,
        )

        if self.show_window:
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.output_labels = output_objects.segmented
            if self.merge_option == UNIFY_PARENT:
                workspace.display_data.parent_labels = workspace.object_set.get_objects(
                    self.parent_object.value).segmented
示例#5
0
    def run(self, workspace):
        """Run the module on the current data set

        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(
            self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(
            self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([
                    p_size < s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(
                    secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([
                    p_size > s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                primary_labels = secondary_objects.crop_image_similarly(
                    primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = cpo.size_similarly(secondary_labels,
                                                   primary_labels)
            if secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = cpo.size_similarly(
                        secondary_labels, tertiary_image)
        # If size/shape differences were too extreme, raise an error.
        if primary_labels.shape != secondary_labels.shape:
            raise ValueError(
                "This module requires that the object sets have matching widths and matching heights.\n"
                "The %s and %s objects do not (%s vs %s).\n"
                "If they are paired correctly you may want to use the ResizeObjects module "
                "to make them the same size." % (
                    self.secondary_objects_name,
                    self.primary_objects_name,
                    secondary_labels.shape,
                    primary_labels.shape,
                ))

        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        if self.shrink_primary:
            primary_mask = np.logical_or(primary_labels == 0, primary_outline)
        else:
            primary_mask = primary_labels == 0
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels) != 0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = cpo.Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = secondary_objects.relate_children(
            tertiary_objects)
        if self.shrink_primary:
            child_count_of_primary, primary_parents = primary_objects.relate_children(
                tertiary_objects)
        else:
            # Primary and tertiary don't overlap.
            # Establish overlap between primary and secondary and commute
            _, secondary_of_primary = secondary_objects.relate_children(
                primary_objects)
            mask = secondary_of_primary != 0
            child_count_of_primary = np.zeros(mask.shape, int)
            child_count_of_primary[mask] = child_count_of_secondary[
                secondary_of_primary[mask] - 1]
            primary_parents = np.zeros(secondary_parents.shape,
                                       secondary_parents.dtype)
            primary_of_secondary = np.zeros(secondary_objects.count + 1, int)
            primary_of_secondary[secondary_of_primary] = np.arange(
                1,
                len(secondary_of_primary) + 1)
            primary_of_secondary[0] = 0
            primary_parents = primary_of_secondary[secondary_parents]
        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count, relationship in (
            (
                self.primary_objects_name,
                primary_parents,
                child_count_of_primary,
                R_REMOVED,
            ),
            (
                self.secondary_objects_name,
                secondary_parents,
                child_count_of_secondary,
                R_PARENT,
            ),
        ):
            m.add_measurement(
                self.subregion_objects_name.value,
                cellprofiler_core.measurement.FF_PARENT %
                parent_objects_name.value,
                parents_of,
            )
            m.add_measurement(
                parent_objects_name.value,
                cellprofiler_core.measurement.FF_CHILDREN_COUNT %
                self.subregion_objects_name.value,
                child_count,
            )
            mask = parents_of != 0
            image_number = np.ones(np.sum(mask), int) * m.image_set_number
            child_object_number = np.argwhere(mask).flatten() + 1
            parent_object_number = parents_of[mask]
            m.add_relate_measurement(
                self.module_num,
                relationship,
                parent_objects_name.value,
                self.subregion_objects_name.value,
                image_number,
                parent_object_number,
                image_number,
                child_object_number,
            )

        object_count = tertiary_objects.count
        #
        # The object count
        #
        cpmi.add_object_count_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           object_count)
        #
        # The object locations
        #
        cpmi.add_object_location_measurements(
            workspace.measurements, self.subregion_objects_name.value,
            tertiary_labels)

        if self.show_window:
            workspace.display_data.primary_labels = primary_labels
            workspace.display_data.secondary_labels = secondary_labels
            workspace.display_data.tertiary_labels = tertiary_labels
            workspace.display_data.tertiary_outlines = tertiary_outlines
示例#6
0
    def run(self, workspace):
        """Run the module on an image set"""

        object_name = self.object_name.value
        remaining_object_name = self.remaining_objects.value
        original_objects = workspace.object_set.get_objects(object_name)

        if self.mask_choice == MC_IMAGE:
            mask = workspace.image_set.get_image(self.masking_image.value,
                                                 must_be_binary=True)
            mask = mask.pixel_data
        else:
            masking_objects = workspace.object_set.get_objects(
                self.masking_objects.value)
            mask = masking_objects.segmented > 0
        if self.wants_inverted_mask:
            mask = ~mask
        #
        # Load the labels
        #
        labels = original_objects.segmented.copy()
        nobjects = np.max(labels)
        #
        # Resize the mask to cover the objects
        #
        mask, m1 = cpo.size_similarly(labels, mask)
        mask[~m1] = False
        #
        # Apply the mask according to the overlap choice.
        #
        if nobjects == 0:
            pass
        elif self.overlap_choice == P_MASK:
            labels = labels * mask
        else:
            pixel_counts = fix(
                scind.sum(mask, labels,
                          np.arange(1, nobjects + 1, dtype=np.int32)))
            if self.overlap_choice == P_KEEP:
                keep = pixel_counts > 0
            else:
                total_pixels = fix(
                    scind.sum(
                        np.ones(labels.shape),
                        labels,
                        np.arange(1, nobjects + 1, dtype=np.int32),
                    ))
                if self.overlap_choice == P_REMOVE:
                    keep = pixel_counts == total_pixels
                elif self.overlap_choice == P_REMOVE_PERCENTAGE:
                    fraction = self.overlap_fraction.value
                    keep = pixel_counts / total_pixels >= fraction
                else:
                    raise NotImplementedError(
                        "Unknown overlap-handling choice: %s",
                        self.overlap_choice.value)
            keep = np.hstack(([False], keep))
            labels[~keep[labels]] = 0
        #
        # Renumber the labels matrix if requested
        #
        if self.retain_or_renumber == R_RENUMBER:
            unique_labels = np.unique(labels[labels != 0])
            indexer = np.zeros(nobjects + 1, int)
            indexer[unique_labels] = np.arange(1, len(unique_labels) + 1)
            labels = indexer[labels]
            parent_objects = unique_labels
        else:
            parent_objects = np.arange(1, nobjects + 1)
        #
        # Add the objects
        #
        remaining_objects = cpo.Objects()
        remaining_objects.segmented = labels
        remaining_objects.unedited_segmented = original_objects.unedited_segmented
        workspace.object_set.add_objects(remaining_objects,
                                         remaining_object_name)
        #
        # Add measurements
        #
        m = workspace.measurements
        m.add_measurement(
            remaining_object_name,
            cellprofiler_core.measurement.FF_PARENT % object_name,
            parent_objects,
        )
        if np.max(original_objects.segmented) == 0:
            child_count = np.array([], int)
        else:
            child_count = fix(
                scind.sum(
                    labels,
                    original_objects.segmented,
                    np.arange(1, nobjects + 1, dtype=np.int32),
                ))
            child_count = (child_count > 0).astype(int)
        m.add_measurement(
            object_name,
            cellprofiler_core.measurement.FF_CHILDREN_COUNT %
            remaining_object_name,
            child_count,
        )
        if self.retain_or_renumber == R_RETAIN:
            remaining_object_count = nobjects
        else:
            remaining_object_count = len(unique_labels)
        I.add_object_count_measurements(m, remaining_object_name,
                                        remaining_object_count)
        I.add_object_location_measurements(m, remaining_object_name, labels)
        #
        # Save the input, mask and output images for display
        #
        if self.show_window:
            workspace.display_data.original_labels = original_objects.segmented
            workspace.display_data.final_labels = labels
            workspace.display_data.mask = mask
示例#7
0
    def run(self, workspace):
        """Run the algorithm on one image set"""
        #
        # Get the image as a binary image
        #
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value, must_be_binary=True)
        mask = image.pixel_data
        if image.has_mask:
            mask = mask & image.mask
        angle_count = self.angle_count.value
        #
        # We collect the i,j and angle of pairs of points that
        # are 3-d adjacent after erosion.
        #
        # i - the i coordinate of each point found after erosion
        # j - the j coordinate of each point found after erosion
        # a - the angle of the structuring element for each point found
        #
        i = np.zeros(0, int)
        j = np.zeros(0, int)
        a = np.zeros(0, int)

        ig, jg = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
        this_idx = 0
        for angle_number in range(angle_count):
            angle = float(angle_number) * np.pi / float(angle_count)
            strel = self.get_diamond(angle)
            erosion = binary_erosion(mask, strel)
            #
            # Accumulate the count, i, j and angle for all foreground points
            # in the erosion
            #
            this_count = np.sum(erosion)
            i = np.hstack((i, ig[erosion]))
            j = np.hstack((j, jg[erosion]))
            a = np.hstack((a, np.ones(this_count, float) * angle))
        #
        # Find connections based on distances, not adjacency
        #
        first, second = self.find_adjacent_by_distance(i, j, a)
        #
        # Do all connected components.
        #
        if len(first) > 0:
            ij_labels = all_connected_components(first, second) + 1
            nlabels = np.max(ij_labels)
            label_indexes = np.arange(1, nlabels + 1)
            #
            # Compute the measurements
            #
            center_x = fix(mean_of_labels(j, ij_labels, label_indexes))
            center_y = fix(mean_of_labels(i, ij_labels, label_indexes))
            #
            # The angles are wierdly complicated because of the wrap-around.
            # You can imagine some horrible cases, like a circular patch of
            # "worm" in which all angles are represented or a gentle "U"
            # curve.
            #
            # For now, I'm going to use the following heuristic:
            #
            # Compute two different "angles". The angles of one go
            # from 0 to 180 and the angles of the other go from -90 to 90.
            # Take the variance of these from the mean and
            # choose the representation with the lowest variance.
            #
            # An alternative would be to compute the variance at each possible
            # dividing point. Another alternative would be to actually trace through
            # the connected components - both overkill for such an inconsequential
            # measurement I hope.
            #
            angles = fix(mean_of_labels(a, ij_labels, label_indexes))
            vangles = fix(
                mean_of_labels((a - angles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aa = a.copy()
            aa[a > np.pi / 2] -= np.pi
            aangles = fix(mean_of_labels(aa, ij_labels, label_indexes))
            vaangles = fix(
                mean_of_labels((aa - aangles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aangles[aangles < 0] += np.pi
            angles[vaangles < vangles] = aangles[vaangles < vangles]
            #
            # Squish the labels to 2-d. The labels for overlaps are arbitrary.
            #
            labels = np.zeros(mask.shape, int)
            labels[i, j] = ij_labels
        else:
            center_x = np.zeros(0, int)
            center_y = np.zeros(0, int)
            angles = np.zeros(0)
            nlabels = 0
            label_indexes = np.zeros(0, int)
            labels = np.zeros(mask.shape, int)

        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        object_name = self.object_name.value
        m.add_measurement(object_name,
                          cellprofiler_core.measurement.M_LOCATION_CENTER_X,
                          center_x)
        m.add_measurement(object_name,
                          cellprofiler_core.measurement.M_LOCATION_CENTER_Y,
                          center_y)
        m.add_measurement(object_name, M_ANGLE, angles * 180 / np.pi)
        m.add_measurement(object_name,
                          cellprofiler_core.measurement.M_NUMBER_OBJECT_NUMBER,
                          label_indexes)
        m.add_image_measurement(
            cellprofiler_core.measurement.FF_COUNT % object_name, nlabels)
        #
        # Make the objects
        #
        object_set = workspace.object_set
        assert isinstance(object_set, cpo.ObjectSet)
        objects = cpo.Objects()
        objects.segmented = labels
        objects.parent_image = image
        object_set.add_objects(objects, object_name)
        if self.show_window:
            workspace.display_data.i = center_y
            workspace.display_data.j = center_x
            workspace.display_data.angle = angles
            workspace.display_data.mask = mask
            workspace.display_data.labels = labels
            workspace.display_data.count = nlabels
    def run(self, workspace):
        """Run the module

        workspace    - The workspace contains
            pipeline     - instance of cpp for this run
            image_set    - the images in the image set being processed
            object_set   - the objects (labeled masks) in this image set
            measurements - the measurements for this run
            frame        - the parent frame to whatever frame is created. None means don't draw.
        """
        orig_objects_name = self.object_name.value
        filtered_objects_name = self.filtered_objects.value

        orig_objects = workspace.object_set.get_objects(orig_objects_name)
        assert isinstance(orig_objects, cpo.Objects)
        orig_labels = [l for l, c in orig_objects.get_labels()]

        if self.wants_image_display:
            guide_image = workspace.image_set.get_image(self.image_name.value)
            guide_image = guide_image.pixel_data
            if np.any(guide_image != np.min(guide_image)):
                guide_image = (guide_image - np.min(guide_image)) / (
                    np.max(guide_image) - np.min(guide_image))
        else:
            guide_image = None
        filtered_labels = workspace.interaction_request(
            self, orig_labels, guide_image,
            workspace.measurements.image_set_number)
        if filtered_labels is None:
            # Ask whoever is listening to stop doing stuff
            workspace.cancel_request()
            # Have to soldier on until the cancel takes effect...
            filtered_labels = orig_labels
        #
        # Renumber objects consecutively if asked to do so
        #
        unique_labels = np.unique(np.array(filtered_labels))
        unique_labels = unique_labels[unique_labels != 0]
        object_count = len(unique_labels)
        if self.renumber_choice == R_RENUMBER:
            mapping = np.zeros(
                1 if len(unique_labels) == 0 else np.max(unique_labels) + 1,
                int)
            mapping[unique_labels] = np.arange(1, object_count + 1)
            filtered_labels = [mapping[l] for l in filtered_labels]
        #
        # Make the objects out of the labels
        #
        filtered_objects = cpo.Objects()
        i, j = np.mgrid[0:filtered_labels[0].shape[0],
                        0:filtered_labels[0].shape[1]]
        ijv = np.zeros((0, 3), filtered_labels[0].dtype)
        for l in filtered_labels:
            ijv = np.vstack(
                (ijv, np.column_stack((i[l != 0], j[l != 0], l[l != 0]))))
        filtered_objects.set_ijv(ijv, orig_labels[0].shape)
        if orig_objects.has_unedited_segmented():
            filtered_objects.unedited_segmented = orig_objects.unedited_segmented
        if orig_objects.parent_image is not None:
            filtered_objects.parent_image = orig_objects.parent_image
        workspace.object_set.add_objects(filtered_objects,
                                         filtered_objects_name)
        #
        # Add parent/child & other measurements
        #
        m = workspace.measurements
        child_count, parents = orig_objects.relate_children(filtered_objects)
        m.add_measurement(
            filtered_objects_name,
            cellprofiler_core.measurement.FF_PARENT % orig_objects_name,
            parents,
        )
        m.add_measurement(
            orig_objects_name,
            cellprofiler_core.measurement.FF_CHILDREN_COUNT %
            filtered_objects_name,
            child_count,
        )
        #
        # The object count
        #
        I.add_object_count_measurements(m, filtered_objects_name, object_count)
        #
        # The object locations
        #
        I.add_object_location_measurements_ijv(m, filtered_objects_name, ijv)

        workspace.display_data.orig_ijv = orig_objects.ijv
        workspace.display_data.filtered_ijv = filtered_objects.ijv
        workspace.display_data.shape = orig_labels[0].shape
    def run(self, workspace):
        '''Run the algorithm on one image set'''
        #
        # Get the image as a binary image
        #
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value,
                                    must_be_binary = True)
        mask = image.pixel_data
        if image.has_mask:
            mask = mask & image.mask
        angle_count = self.angle_count.value
        #
        # We collect the i,j and angle of pairs of points that
        # are 3-d adjacent after erosion.
        #
        # i - the i coordinate of each point found after erosion
        # j - the j coordinate of each point found after erosion
        # a - the angle of the structuring element for each point found
        #
        i = np.zeros(0, int)
        j = np.zeros(0, int)
        a = np.zeros(0, int)

        ig, jg = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
        #this_idx = 0
        for angle_number in range(angle_count):
            angle = float(angle_number) * np.pi / float(angle_count)
            strel = self.get_diamond(angle)
            erosion = binary_erosion(mask, strel)
            #
            # Accumulate the count, i, j and angle for all foreground points
            # in the erosion
            #
            this_count = np.sum(erosion)
            i = np.hstack((i, ig[erosion]))
            j = np.hstack((j, jg[erosion]))
            a = np.hstack((a, np.ones(this_count, float) * angle))
        #
        # Find connections based on distances, not adjacency
        #
        first, second = self.find_adjacent_by_distance(i,j,a)
        #
        # Do all connected components.
        #
        if len(first) > 0:
            ij_labels = all_connected_components(first, second) + 1
            nlabels = np.max(ij_labels)
            label_indexes = np.arange(1, nlabels + 1)
            #
            # Compute the measurements
            #
            center_x = fix(mean_of_labels(j, ij_labels, label_indexes))
            center_y = fix(mean_of_labels(i, ij_labels, label_indexes))
            #
            # The angles are wierdly complicated because of the wrap-around.
            # You can imagine some horrible cases, like a circular patch of
            # "linear object" in which all angles are represented or a gentle "U"
            # curve.
            #
            # For now, I'm going to use the following heuristic:
            #
            # Compute two different "angles". The angles of one go
            # from 0 to 180 and the angles of the other go from -90 to 90.
            # Take the variance of these from the mean and
            # choose the representation with the lowest variance.
            #
            # An alternative would be to compute the variance at each possible
            # dividing point. Another alternative would be to actually trace through
            # the connected components - both overkill for such an inconsequential
            # measurement I hope.
            #
            angles = fix(mean_of_labels(a, ij_labels, label_indexes))
            vangles = fix(mean_of_labels((a - angles[ij_labels-1])**2,
                                         ij_labels, label_indexes))
            aa = a.copy()
            aa[a > np.pi / 2] -= np.pi
            aangles = fix(mean_of_labels(aa, ij_labels, label_indexes))
            vaangles = fix(mean_of_labels((aa-aangles[ij_labels-1])**2,
                                          ij_labels, label_indexes))
            aangles[aangles < 0] += np.pi
            angles[vaangles < vangles] = aangles[vaangles < vangles]
        else:
            center_x = np.zeros(0, int)
            center_y = np.zeros(0, int)
            angles = np.zeros(0)
            nlabels = 0
            label_indexes = np.zeros(0, int)
            labels = np.zeros(mask.shape, int)

        ifull=[]
        jfull=[]
        ij_labelsfull=[]
        labeldict={}

        for label_id in np.unique(label_indexes):
            r = np.array(i)*(ij_labels==label_id)
            r=r[r!=0]
            c = np.array(j)*(ij_labels==label_id)
            c=c[c!=0]
            rect_strel=self.get_rectangle(angles[label_id-1])
            seedmask = np.zeros_like(mask, int)
            seedmask[r,c]=label_id

            reconstructedlinearobject=skimage.filters.rank.maximum(seedmask,rect_strel)
            reconstructedlinearobject=reconstructedlinearobject*mask
            if self.overlap_within_angle==False:
                itemp,jtemp=np.where(reconstructedlinearobject==label_id)
                ifull+=list(itemp)
                jfull+=list(jtemp)
                ij_labelsfull+=[label_id]*len(itemp)

            else:
                itemp,jtemp=np.where(reconstructedlinearobject==label_id)
                labeldict[label_id]=zip(itemp,jtemp)

        if self.overlap_within_angle==True:
            angledict={}
            for eachangle in range(len(angles)):
                angledict[eachangle+1]=[angles[eachangle]]
            nmerges=1
            while nmerges!=0:
                nmerges=sum([self.mergeduplicates(firstlabel,secondlabel,labeldict,angledict) for firstlabel in label_indexes for secondlabel in label_indexes if firstlabel!=secondlabel])

            newlabels = sorted(labeldict.keys())
            newangles = sorted(angledict.keys())
            angles=[]
            for eachnewlabel in range(len(newlabels)):
                ifull+=[int(eachloc[0]) for eachloc in labeldict[newlabels[eachnewlabel]]]
                jfull+=[int(eachloc[1]) for eachloc in labeldict[newlabels[eachnewlabel]]]
                ij_labelsfull+=[eachnewlabel+1]*len(labeldict[newlabels[eachnewlabel]])
                angles.append(np.mean(angledict[newlabels[eachnewlabel]]))
            angles=np.array(angles)

        ijv = np.zeros([len(ifull),3],dtype=int)
        ijv[:,0]=ifull
        ijv[:,1]=jfull
        ijv[:,2]=ij_labelsfull


        #
        # Make the objects
        #
        object_set = workspace.object_set
        object_name = self.object_name.value
        assert isinstance(object_set, cpo.ObjectSet)
        objects = cpo.Objects()
        objects.ijv = ijv
        objects.parent_image = image
        object_set.add_objects(objects, object_name)
        if self.show_window:
            workspace.display_data.mask = mask
            workspace.display_data.overlapping_labels = [
                l for l, idx in objects.get_labels()]
        if self.overlap_within_angle==True:
            center_x = np.bincount(ijv[:, 2], ijv[:, 1])[objects.indices] / objects.areas
            center_y = np.bincount(ijv[:, 2], ijv[:, 0])[objects.indices] / objects.areas

        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        m.add_measurement(object_name, "Location_Center_X", center_x)
        m.add_measurement(object_name, "Location_Center_Y", center_y)
        m.add_measurement(object_name, M_ANGLE, angles * 180 / np.pi)
        m.add_measurement(object_name, "Number_Object_Number", label_indexes)
        m.add_image_measurement("Count_%s" % object_name, nlabels)