Пример #1
0
    def run(self, workspace):
        image_name = self.image_name.value
        objects_name = self.objects_name.value
        image = workspace.image_set.get_image(image_name)
        pixel_data = image.pixel_data

        labels = workspace.interaction_request(
            self, pixel_data, workspace.measurements.image_set_number)
        if labels is None:
            # User cancelled. Soldier on as best we can.
            workspace.cancel_request()
            labels = numpy.zeros(pixel_data.shape[:2], int)
        objects = Objects()
        objects.segmented = labels
        workspace.object_set.add_objects(objects, objects_name)

        ##################
        #
        # Add measurements
        #
        m = workspace.measurements
        #
        # The object count
        #
        object_count = numpy.max(labels)
        add_object_count_measurements(m, objects_name, object_count)
        #
        # The object locations
        #
        add_object_location_measurements(m, objects_name, labels)

        workspace.display_data.labels = labels
        workspace.display_data.pixel_data = pixel_data
Пример #2
0
    def run(self, workspace):
        for object_name in (self.objects_x.value, self.objects_y.value):
            if object_name not in workspace.object_set.object_names:
                raise ValueError(
                    "The %s objects are missing from the pipeline." %
                    object_name)
        objects_x = workspace.object_set.get_objects(self.objects_x.value)

        objects_y = workspace.object_set.get_objects(self.objects_y.value)

        assert (objects_x.shape == objects_y.shape
                ), "Objects sets must have the same dimensions"

        labels_x = objects_x.segmented.copy()
        labels_y = objects_y.segmented.copy()

        output = self.combine_arrays(labels_x, labels_y)
        output_labels = skimage.morphology.label(output)
        output_objects = Objects()
        output_objects.segmented = output_labels

        workspace.object_set.add_objects(output_objects,
                                         self.output_object.value)

        if self.show_window:
            workspace.display_data.input_object_x_name = self.objects_x.value
            workspace.display_data.input_object_x = objects_x.segmented
            workspace.display_data.input_object_y_name = self.objects_y.value
            workspace.display_data.input_object_y = objects_y.segmented
            workspace.display_data.output_object_name = self.output_object.value
            workspace.display_data.output_object = output_objects.segmented
Пример #3
0
    def run(self, workspace):
        x_name = self.x_name.value
        y_name = self.y_name.value
        objects = workspace.object_set
        x = objects.get_objects(x_name)
        dimensions = x.dimensions
        x_data = x.segmented

        if self.method.value == "Dimensions":
            y_data = resize(x_data, (self.height.value, self.width.value))
        elif self.method.value == "Match Image":
            target_image = workspace.image_set.get_image(
                self.specific_image.value)
            if target_image.volumetric:
                tgt_height, tgt_width = target_image.pixel_data.shape[1:3]
            else:
                tgt_height, tgt_width = target_image.pixel_data.shape[:2]
            size = (tgt_height, tgt_width)
            y_data = resize(x_data, size)
        else:
            y_data = rescale(x_data, self.factor.value)

        y = Objects()
        y.segmented = y_data
        y.parent_image = x.parent_image
        objects.add_objects(y, y_name)
        self.add_measurements(workspace)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data = x.pixel_data

        args = (setting.value for setting in self.settings()[2:])

        y_data = self.function(x_data, *args)

        y = Objects()

        y.segmented = y_data

        y.parent_image = x.parent_image

        objects = workspace.object_set

        objects.add_objects(y, y_name)

        self.add_measurements(workspace)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
    def run(self, workspace):
        """Find the outlines on the current image set

        workspace    - The workspace contains
            pipeline     - instance of cpp for this run
            image_set    - the images in the image set being processed
            object_set   - the objects (labeled masks) in this image set
            measurements - the measurements for this run
            frame        - the parent frame to whatever frame is created. None means don't draw.
        """
        gridding = workspace.get_grid(self.grid_name.value)
        if self.shape_choice == SHAPE_RECTANGLE:
            labels = self.run_rectangle(workspace, gridding)
        elif self.shape_choice == SHAPE_CIRCLE_FORCED:
            labels = self.run_forced_circle(workspace, gridding)
        elif self.shape_choice == SHAPE_CIRCLE_NATURAL:
            labels = self.run_natural_circle(workspace, gridding)
        elif self.shape_choice == SHAPE_NATURAL:
            labels = self.run_natural(workspace, gridding)
        objects = Objects()
        objects.segmented = labels
        object_count = gridding.rows * gridding.columns
        workspace.object_set.add_objects(objects,
                                         self.output_objects_name.value)
        add_object_location_measurements(workspace.measurements,
                                         self.output_objects_name.value,
                                         labels, object_count)
        add_object_count_measurements(workspace.measurements,
                                      self.output_objects_name.value,
                                      object_count)
        if self.show_window:
            workspace.display_data.gridding = gridding
            workspace.display_data.labels = labels
Пример #6
0
    def test_segmented(self):
        segmented = numpy.ones((224, 224), numpy.bool)

        objects = Objects()

        objects.segmented = segmented

        numpy.testing.assert_array_equal(objects.segmented, segmented)
Пример #7
0
    def test_dimensions(self):
        x = numpy.zeros((100, 224, 224, 3), numpy.float32)

        parent_image = Image(x, dimensions=3)

        objects = Objects()

        objects.parent_image = parent_image

        assert objects.dimensions == 3
Пример #8
0
    def test_masked(self):
        x = numpy.zeros((224, 224, 3), numpy.float32)

        mask = numpy.ones((224, 224), numpy.bool)

        parent_image = Image(x, mask=mask)

        objects = Objects()

        objects.segmented = mask

        objects.parent_image = parent_image

        numpy.testing.assert_array_equal(objects.masked, mask)
Пример #9
0
    def run(self, workspace):
        for object_name in (self.objects_x.value, self.objects_y.value):
            if object_name not in workspace.object_set.object_names:
                raise ValueError(
                    "The %s objects are missing from the pipeline." %
                    object_name)
        objects_x = workspace.object_set.get_objects(self.objects_x.value)

        objects_y = workspace.object_set.get_objects(self.objects_y.value)

        dimensions = objects_x.dimensions

        assert (objects_x.shape == objects_y.shape
                ), "Objects sets must have the same dimensions"

        labels_x = objects_x.segmented.copy().astype("uint16")
        labels_y = objects_y.segmented.copy().astype("uint16")

        output = combineobjects(self.merge_method.value, labels_x, labels_y,
                                dimensions)
        output_labels = skimage.morphology.label(output)
        output_objects = Objects()
        output_objects.segmented = output_labels

        workspace.object_set.add_objects(output_objects,
                                         self.output_object.value)

        m = workspace.measurements
        object_count = numpy.max(output_labels)
        add_object_count_measurements(m, self.output_object.value,
                                      object_count)
        add_object_location_measurements(m, self.output_object.value,
                                         output_labels)

        if self.show_window:
            workspace.display_data.input_object_x_name = self.objects_x.value
            workspace.display_data.input_object_x = objects_x.segmented
            workspace.display_data.input_object_y_name = self.objects_y.value
            workspace.display_data.input_object_y = objects_y.segmented
            workspace.display_data.output_object_name = self.output_object.value
            workspace.display_data.output_object = output_objects.segmented
            workspace.display_data.dimensions = dimensions
Пример #10
0
    def run(self, workspace):
        x_name = self.x_name.value
        y_name = self.y_name.value
        objects = workspace.object_set
        x = objects.get_objects(x_name)
        dimensions = x.dimensions
        x_data = x.segmented

        if self.method.value == "Dimensions":
            if x_data.ndim == 3:
                size = (self.planes.value, self.height.value, self.width.value)
            else:
                size = (self.height.value, self.width.value)
            y_data = resize(x_data, size)
        elif self.method.value == "Match Image":
            target_image = workspace.image_set.get_image(
                self.specific_image.value)
            if target_image.volumetric:
                size = target_image.pixel_data.shape[:3]
            else:
                size = target_image.pixel_data.shape[:2]
            y_data = resize(x_data, size)
        else:
            if x_data.ndim == 3:
                size = (self.factor_z.value, self.factor_y.value,
                        self.factor_x.value)
            else:
                size = (self.factor_y.value, self.factor_x.value)
            y_data = rescale(x_data, size)
        y = Objects()
        y.segmented = y_data
        objects.add_objects(y, y_name)
        self.add_measurements(workspace)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
Пример #11
0
    def run(self, workspace):
        x_name = self.x_name.value
        y_name = self.y_name.value
        objects = workspace.object_set
        x = objects.get_objects(x_name)
        dimensions = x.dimensions
        x_data = x.segmented

        props = skimage.measure.regionprops(
            x_data
        )  # , properties=('label', 'centroid'))
        y_data = numpy.zeros_like(x_data)
        for region in props:
            label = region.label
            binary = x_data == label
            eroded = cellprofiler.utilities.morphology.binary_erosion(
                binary, self.structuring_element.value
            )
            y_data[eroded] = label
            if self.preserve_midpoints.value:
                if label not in y_data:
                    midpoint = scipy.ndimage.morphology.distance_transform_edt(binary)
                    y_data[midpoint == numpy.max(midpoint)] = label

        if self.relabel_objects.value:
            y_data = skimage.morphology.label(y_data)

        y = Objects()
        y.segmented = y_data
        y.parent_image = x.parent_image
        objects.add_objects(y, y_name)
        self.add_measurements(workspace)

        if self.show_window:
            workspace.display_data.x_data = x_data
            workspace.display_data.y_data = y_data
            workspace.display_data.dimensions = dimensions
Пример #12
0
    def run(self, workspace):
        """Run the algorithm on one image set"""
        #
        # Get the image as a binary image
        #
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value, must_be_binary=True)
        mask = image.pixel_data
        if image.has_mask:
            mask = mask & image.mask
        angle_count = self.angle_count.value
        #
        # We collect the i,j and angle of pairs of points that
        # are 3-d adjacent after erosion.
        #
        # i - the i coordinate of each point found after erosion
        # j - the j coordinate of each point found after erosion
        # a - the angle of the structuring element for each point found
        #
        i = numpy.zeros(0, int)
        j = numpy.zeros(0, int)
        a = numpy.zeros(0, int)

        ig, jg = numpy.mgrid[0 : mask.shape[0], 0 : mask.shape[1]]
        this_idx = 0
        for angle_number in range(angle_count):
            angle = float(angle_number) * numpy.pi / float(angle_count)
            strel = self.get_diamond(angle)
            erosion = binary_erosion(mask, strel)
            #
            # Accumulate the count, i, j and angle for all foreground points
            # in the erosion
            #
            this_count = numpy.sum(erosion)
            i = numpy.hstack((i, ig[erosion]))
            j = numpy.hstack((j, jg[erosion]))
            a = numpy.hstack((a, numpy.ones(this_count, float) * angle))
        #
        # Find connections based on distances, not adjacency
        #
        first, second = self.find_adjacent_by_distance(i, j, a)
        #
        # Do all connected components.
        #
        if len(first) > 0:
            ij_labels = all_connected_components(first, second) + 1
            nlabels = numpy.max(ij_labels)
            label_indexes = numpy.arange(1, nlabels + 1)
            #
            # Compute the measurements
            #
            center_x = fixup_scipy_ndimage_result(
                mean_of_labels(j, ij_labels, label_indexes)
            )
            center_y = fixup_scipy_ndimage_result(
                mean_of_labels(i, ij_labels, label_indexes)
            )
            #
            # The angles are wierdly complicated because of the wrap-around.
            # You can imagine some horrible cases, like a circular patch of
            # "worm" in which all angles are represented or a gentle "U"
            # curve.
            #
            # For now, I'm going to use the following heuristic:
            #
            # Compute two different "angles". The angles of one go
            # from 0 to 180 and the angles of the other go from -90 to 90.
            # Take the variance of these from the mean and
            # choose the representation with the lowest variance.
            #
            # An alternative would be to compute the variance at each possible
            # dividing point. Another alternative would be to actually trace through
            # the connected components - both overkill for such an inconsequential
            # measurement I hope.
            #
            angles = fixup_scipy_ndimage_result(
                mean_of_labels(a, ij_labels, label_indexes)
            )
            vangles = fixup_scipy_ndimage_result(
                mean_of_labels(
                    (a - angles[ij_labels - 1]) ** 2, ij_labels, label_indexes
                )
            )
            aa = a.copy()
            aa[a > numpy.pi / 2] -= numpy.pi
            aangles = fixup_scipy_ndimage_result(
                mean_of_labels(aa, ij_labels, label_indexes)
            )
            vaangles = fixup_scipy_ndimage_result(
                mean_of_labels(
                    (aa - aangles[ij_labels - 1]) ** 2, ij_labels, label_indexes
                )
            )
            aangles[aangles < 0] += numpy.pi
            angles[vaangles < vangles] = aangles[vaangles < vangles]
            #
            # Squish the labels to 2-d. The labels for overlaps are arbitrary.
            #
            labels = numpy.zeros(mask.shape, int)
            labels[i, j] = ij_labels
        else:
            center_x = numpy.zeros(0, int)
            center_y = numpy.zeros(0, int)
            angles = numpy.zeros(0)
            nlabels = 0
            label_indexes = numpy.zeros(0, int)
            labels = numpy.zeros(mask.shape, int)

        m = workspace.measurements
        assert isinstance(m, Measurements)
        object_name = self.object_name.value
        m.add_measurement(object_name, M_LOCATION_CENTER_X, center_x)
        m.add_measurement(object_name, M_LOCATION_CENTER_Y, center_y)
        m.add_measurement(object_name, M_ANGLE, angles * 180 / numpy.pi)
        m.add_measurement(
            object_name, M_NUMBER_OBJECT_NUMBER, label_indexes,
        )
        m.add_image_measurement(FF_COUNT % object_name, nlabels)
        #
        # Make the objects
        #
        object_set = workspace.object_set
        assert isinstance(object_set, ObjectSet)
        objects = Objects()
        objects.segmented = labels
        objects.parent_image = image
        object_set.add_objects(objects, object_name)
        if self.show_window:
            workspace.display_data.i = center_y
            workspace.display_data.j = center_x
            workspace.display_data.angle = angles
            workspace.display_data.mask = mask
            workspace.display_data.labels = labels
            workspace.display_data.count = nlabels
    def run(self, workspace):
        """Run the module on the current data set

        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(
            self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(
            self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([
                    p_size < s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(
                    secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([
                    p_size > s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                primary_labels = secondary_objects.crop_image_similarly(
                    primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = size_similarly(secondary_labels,
                                               primary_labels)
            if secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = size_similarly(secondary_labels,
                                                       tertiary_image)
        # If size/shape differences were too extreme, raise an error.
        if primary_labels.shape != secondary_labels.shape:
            raise ValueError(
                "This module requires that the object sets have matching widths and matching heights.\n"
                "The %s and %s objects do not (%s vs %s).\n"
                "If they are paired correctly you may want to use the ResizeObjects module "
                "to make them the same size." % (
                    self.secondary_objects_name,
                    self.primary_objects_name,
                    secondary_labels.shape,
                    primary_labels.shape,
                ))

        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        if self.shrink_primary:
            primary_mask = numpy.logical_or(primary_labels == 0,
                                            primary_outline)
        else:
            primary_mask = primary_labels == 0
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels) != 0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = secondary_objects.relate_children(
            tertiary_objects)
        if self.shrink_primary:
            child_count_of_primary, primary_parents = primary_objects.relate_children(
                tertiary_objects)
        else:
            # Primary and tertiary don't overlap.
            # Establish overlap between primary and secondary and commute
            _, secondary_of_primary = secondary_objects.relate_children(
                primary_objects)
            mask = secondary_of_primary != 0
            child_count_of_primary = numpy.zeros(mask.shape, int)
            child_count_of_primary[mask] = child_count_of_secondary[
                secondary_of_primary[mask] - 1]
            primary_parents = numpy.zeros(secondary_parents.shape,
                                          secondary_parents.dtype)
            primary_of_secondary = numpy.zeros(secondary_objects.count + 1,
                                               int)
            primary_of_secondary[secondary_of_primary] = numpy.arange(
                1,
                len(secondary_of_primary) + 1)
            primary_of_secondary[0] = 0
            primary_parents = primary_of_secondary[secondary_parents]
        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count, relationship in (
            (
                self.primary_objects_name,
                primary_parents,
                child_count_of_primary,
                R_REMOVED,
            ),
            (
                self.secondary_objects_name,
                secondary_parents,
                child_count_of_secondary,
                R_PARENT,
            ),
        ):
            m.add_measurement(
                self.subregion_objects_name.value,
                FF_PARENT % parent_objects_name.value,
                parents_of,
            )
            m.add_measurement(
                parent_objects_name.value,
                FF_CHILDREN_COUNT % self.subregion_objects_name.value,
                child_count,
            )
            mask = parents_of != 0
            image_number = numpy.ones(numpy.sum(mask),
                                      int) * m.image_set_number
            child_object_number = numpy.argwhere(mask).flatten() + 1
            parent_object_number = parents_of[mask]
            m.add_relate_measurement(
                self.module_num,
                relationship,
                parent_objects_name.value,
                self.subregion_objects_name.value,
                image_number,
                parent_object_number,
                image_number,
                child_object_number,
            )

        object_count = tertiary_objects.count
        #
        # The object count
        #
        add_object_count_measurements(workspace.measurements,
                                      self.subregion_objects_name.value,
                                      object_count)
        #
        # The object locations
        #
        add_object_location_measurements(workspace.measurements,
                                         self.subregion_objects_name.value,
                                         tertiary_labels)

        if self.show_window:
            workspace.display_data.primary_labels = primary_labels
            workspace.display_data.secondary_labels = secondary_labels
            workspace.display_data.tertiary_labels = tertiary_labels
            workspace.display_data.tertiary_outlines = tertiary_outlines
    def run(self, workspace):
        images = workspace.image_set
        x = images.get_image(self.x_name.value)
        dimensions = x.dimensions
        x_data = x.pixel_data

        # Validate some settings
        if self.model.value in (GREY_1, GREY_2) and x.multichannel:
            raise ValueError(
                "Color images are not supported by this model. Please provide greyscale images."
            )
        elif self.model.value == COLOR_1 and not x.multichannel:
            raise ValueError(
                "Greyscale images are not supported by this model. Please provide a color overlay."
            )

        if self.model.value != MODEL_CUSTOM:
            if x.volumetric:
                raise NotImplementedError(
                    "StarDist's inbuilt models do not currently support 3D images"
                )
            model = StarDist2D.from_pretrained(self.model.value)
        else:
            model_directory, model_name = os.path.split(
                self.model_directory.get_absolute_path())
            if x.volumetric:
                from stardist.models import StarDist3D
                model = StarDist3D(config=None,
                                   basedir=model_directory,
                                   name=model_name)
            else:
                model = StarDist2D(config=None,
                                   basedir=model_directory,
                                   name=model_name)

        tiles = None
        if self.tile_image.value:
            tiles = []
            if x.volumetric:
                tiles += [1]
            tiles += [self.n_tiles_x.value, self.n_tiles_y.value]
            # Handle colour channels
            tiles += [1] * max(0, x.pixel_data.ndim - len(tiles))
            print(x.pixel_data.shape, x.pixel_data.ndim, tiles)

        if not self.save_probabilities.value:
            # Probabilities aren't wanted, things are simple
            data = model.predict_instances(normalize(x.pixel_data),
                                           return_predict=False,
                                           n_tiles=tiles)
            y_data = data[0]
        else:
            data, probs = model.predict_instances(normalize(x.pixel_data),
                                                  return_predict=True,
                                                  sparse=False,
                                                  n_tiles=tiles)
            y_data = data[0]

            # Scores aren't at the same resolution as the input image.
            # We need to slightly resize to match the original image.
            size_corrected = resize(probs[0], y_data.shape)
            prob_image = Image(
                size_corrected,
                parent_image=x.parent_image,
                convert=False,
                dimensions=len(size_corrected.shape),
            )

            workspace.image_set.add(self.probabilities_name.value, prob_image)

            if self.show_window:
                workspace.display_data.probabilities = size_corrected

        y = Objects()
        y.segmented = y_data
        y.parent_image = x.parent_image
        objects = workspace.object_set
        objects.add_objects(y, self.y_name.value)

        self.add_measurements(workspace)

        if self.show_window:
            workspace.display_data.x_data = x_data
            workspace.display_data.y_data = y_data
            workspace.display_data.dimensions = dimensions
Пример #15
0
    def run_on_objects(self, object_name, workspace):
        """Determine desired measurements and pass in object arrays for analysis"""
        objects = workspace.get_objects(object_name)

        # Don't analyze if there are no objects at all.

        if len(objects.indices) == 0:
            # No objects to process
            self.measurements_without_objects(workspace, object_name)
            return

        # Determine which properties we're measuring.
        if len(objects.shape) == 2:
            desired_properties = [
                "label",
                "image",
                "area",
                "perimeter",
                "bbox",
                "bbox_area",
                "major_axis_length",
                "minor_axis_length",
                "orientation",
                "centroid",
                "equivalent_diameter",
                "extent",
                "eccentricity",
                "solidity",
                "euler_number",
            ]
            if self.calculate_advanced.value:
                desired_properties += [
                    "inertia_tensor",
                    "inertia_tensor_eigvals",
                    "moments",
                    "moments_central",
                    "moments_hu",
                    "moments_normalized",
                ]
        else:
            desired_properties = [
                "label",
                "image",
                "area",
                "centroid",
                "bbox",
                "bbox_area",
                "major_axis_length",
                "minor_axis_length",
                "extent",
                "equivalent_diameter",
                "euler_number",
            ]
            if self.calculate_advanced.value:
                desired_properties += [
                    "solidity",
                ]

        # Check for overlapping object sets
        if not objects.overlapping():
            features_to_record = self.analyze_objects(objects,
                                                      desired_properties)
        else:
            # Objects are overlapping, process as single arrays
            coords_array = objects.ijv
            features_to_record = {}
            for label in objects.indices:
                omap = numpy.zeros(objects.shape)
                ocoords = coords_array[coords_array[:, 2] == label, 0:2]
                numpy.put(omap, numpy.ravel_multi_index(ocoords.T, omap.shape),
                          1)
                tempobject = Objects()
                tempobject.segmented = omap
                buffer = self.analyze_objects(tempobject, desired_properties)
                for f, m in buffer.items():
                    if f in features_to_record:
                        features_to_record[f] = numpy.concatenate(
                            (features_to_record[f], m))
                    else:
                        features_to_record[f] = m
        for f, m in features_to_record.items():
            self.record_measurement(workspace, object_name, f, m)
    def run(self, workspace):
        if self.mode.value != MODE_CUSTOM:
            model = models.Cellpose(model_type='cyto' if self.mode.value
                                    == MODE_CELLS else 'nuclei',
                                    gpu=self.use_gpu.value)
        else:
            model_file = self.model_file_name.value
            model_directory = self.model_directory.get_absolute_path()
            model_path = os.path.join(model_directory, model_file)
            model = models.CellposeModel(pretrained_model=model_path,
                                         gpu=self.use_gpu.value)

        x_name = self.x_name.value
        y_name = self.y_name.value
        images = workspace.image_set
        x = images.get_image(x_name)
        dimensions = x.dimensions
        x_data = x.pixel_data

        if x.multichannel:
            raise ValueError(
                "Color images are not currently supported. Please provide greyscale images."
            )

        if self.mode.value != "Nuclei" and self.supply_nuclei.value:
            nuc_image = images.get_image(self.nuclei_image.value)
            # CellPose expects RGB, we'll have a blank red channel, cells in green and nuclei in blue.
            if x.volumetric:
                x_data = numpy.stack(
                    (numpy.zeros_like(x_data), x_data, nuc_image.pixel_data),
                    axis=1)
            else:
                x_data = numpy.stack(
                    (numpy.zeros_like(x_data), x_data, nuc_image.pixel_data),
                    axis=-1)
            channels = [2, 3]
        else:
            channels = [0, 0]

        diam = self.expected_diameter.value if self.expected_diameter.value > 0 else None

        try:
            y_data, flows, *_ = model.eval(
                x_data,
                channels=channels,
                diameter=diam,
                net_avg=self.use_averaging.value,
                do_3D=x.volumetric,
                flow_threshold=self.flow_threshold.value,
                cellprob_threshold=self.dist_threshold.value)
        finally:
            if self.use_gpu.value and model.torch:
                # Try to clear some GPU memory for other worker processes.
                try:
                    from torch import cuda
                    cuda.empty_cache()
                except Exception as e:
                    print(
                        f"Unable to clear GPU memory. You may need to restart CellProfiler to change models. {e}"
                    )

        y = Objects()
        y.segmented = y_data
        y.parent_image = x.parent_image
        objects = workspace.object_set
        objects.add_objects(y, y_name)

        if self.save_probabilities.value:
            # Flows come out sized relative to CellPose's inbuilt model size.
            # We need to slightly resize to match the original image.
            size_corrected = resize(flows[2], y_data.shape)
            prob_image = Image(
                size_corrected,
                parent_image=x.parent_image,
                convert=False,
                dimensions=len(size_corrected.shape),
            )

            workspace.image_set.add(self.probabilities_name.value, prob_image)

            if self.show_window:
                workspace.display_data.probabilities = size_corrected

        self.add_measurements(workspace)

        if self.show_window:
            if x.volumetric:
                # Can't show CellPose-accepted colour images in 3D
                workspace.display_data.x_data = x.pixel_data
            else:
                workspace.display_data.x_data = x_data
            workspace.display_data.y_data = y_data
            workspace.display_data.dimensions = dimensions
Пример #17
0
    def run(self, workspace):
        """Run the module

        workspace    - The workspace contains
            pipeline     - instance of cpp for this run
            image_set    - the images in the image set being processed
            object_set   - the objects (labeled masks) in this image set
            measurements - the measurements for this run
            frame        - the parent frame to whatever frame is created. None means don't draw.
        """
        orig_objects_name = self.object_name.value
        filtered_objects_name = self.filtered_objects.value

        orig_objects = workspace.object_set.get_objects(orig_objects_name)
        assert isinstance(orig_objects, Objects)
        orig_labels = [l for l, c in orig_objects.get_labels()]

        if self.wants_image_display:
            guide_image = workspace.image_set.get_image(self.image_name.value)
            guide_image = guide_image.pixel_data
            if guide_image.dtype == bool:
                guide_image = guide_image.astype(int)
            if numpy.any(guide_image != numpy.min(guide_image)):
                guide_image = (guide_image - numpy.min(guide_image)) / (
                    numpy.max(guide_image) - numpy.min(guide_image))
        else:
            guide_image = None
        filtered_labels = workspace.interaction_request(
            self, orig_labels, guide_image,
            workspace.measurements.image_set_number)
        if filtered_labels is None:
            # Ask whoever is listening to stop doing stuff
            workspace.cancel_request()
            # Have to soldier on until the cancel takes effect...
            filtered_labels = orig_labels
        #
        # Renumber objects consecutively if asked to do so
        #
        unique_labels = numpy.unique(numpy.array(filtered_labels))
        unique_labels = unique_labels[unique_labels != 0]
        object_count = len(unique_labels)
        if self.renumber_choice == R_RENUMBER:
            mapping = numpy.zeros(
                1 if len(unique_labels) == 0 else numpy.max(unique_labels) + 1,
                int)
            mapping[unique_labels] = numpy.arange(1, object_count + 1)
            filtered_labels = [mapping[l] for l in filtered_labels]
        #
        # Make the objects out of the labels
        #
        filtered_objects = Objects()
        i, j = numpy.mgrid[0:filtered_labels[0].shape[0],
                           0:filtered_labels[0].shape[1]]
        ijv = numpy.zeros((0, 3), filtered_labels[0].dtype)
        for l in filtered_labels:
            ijv = numpy.vstack(
                (ijv, numpy.column_stack((i[l != 0], j[l != 0], l[l != 0]))))
        filtered_objects.set_ijv(ijv, orig_labels[0].shape)
        if orig_objects.has_unedited_segmented():
            filtered_objects.unedited_segmented = orig_objects.unedited_segmented
        if orig_objects.parent_image is not None:
            filtered_objects.parent_image = orig_objects.parent_image
        workspace.object_set.add_objects(filtered_objects,
                                         filtered_objects_name)
        #
        # Add parent/child & other measurements
        #
        m = workspace.measurements
        child_count, parents = orig_objects.relate_children(filtered_objects)
        m.add_measurement(
            filtered_objects_name,
            FF_PARENT % orig_objects_name,
            parents,
        )
        m.add_measurement(
            orig_objects_name,
            FF_CHILDREN_COUNT % filtered_objects_name,
            child_count,
        )
        #
        # The object count
        #
        add_object_count_measurements(m, filtered_objects_name, object_count)
        #
        # The object locations
        #
        add_object_location_measurements_ijv(m, filtered_objects_name, ijv)

        workspace.display_data.orig_ijv = orig_objects.ijv
        workspace.display_data.filtered_ijv = filtered_objects.ijv
        workspace.display_data.shape = orig_labels[0].shape
Пример #18
0
    def run(self, workspace):
        """Run the module on an image set"""

        object_name = self.object_name.value
        remaining_object_name = self.remaining_objects.value
        original_objects = workspace.object_set.get_objects(object_name)

        if self.mask_choice == MC_IMAGE:
            mask = workspace.image_set.get_image(
                self.masking_image.value, must_be_binary=True
            )
            mask = mask.pixel_data
        else:
            masking_objects = workspace.object_set.get_objects(
                self.masking_objects.value
            )
            mask = masking_objects.segmented > 0
        if self.wants_inverted_mask:
            mask = ~mask
        #
        # Load the labels
        #
        labels = original_objects.segmented.copy()
        nobjects = numpy.max(labels)
        #
        # Resize the mask to cover the objects
        #
        mask, m1 = size_similarly(labels, mask)
        mask[~m1] = False
        #
        # Apply the mask according to the overlap choice.
        #
        if nobjects == 0:
            pass
        elif self.overlap_choice == P_MASK:
            labels = labels * mask
        else:
            pixel_counts = fixup_scipy_ndimage_result(
                scipy.ndimage.sum(
                    mask, labels, numpy.arange(1, nobjects + 1, dtype=numpy.int32)
                )
            )
            if self.overlap_choice == P_KEEP:
                keep = pixel_counts > 0
            else:
                total_pixels = fixup_scipy_ndimage_result(
                    scipy.ndimage.sum(
                        numpy.ones(labels.shape),
                        labels,
                        numpy.arange(1, nobjects + 1, dtype=numpy.int32),
                    )
                )
                if self.overlap_choice == P_REMOVE:
                    keep = pixel_counts == total_pixels
                elif self.overlap_choice == P_REMOVE_PERCENTAGE:
                    fraction = self.overlap_fraction.value
                    keep = pixel_counts / total_pixels >= fraction
                else:
                    raise NotImplementedError(
                        "Unknown overlap-handling choice: %s", self.overlap_choice.value
                    )
            keep = numpy.hstack(([False], keep))
            labels[~keep[labels]] = 0
        #
        # Renumber the labels matrix if requested
        #
        if self.retain_or_renumber == R_RENUMBER:
            unique_labels = numpy.unique(labels[labels != 0])
            indexer = numpy.zeros(nobjects + 1, int)
            indexer[unique_labels] = numpy.arange(1, len(unique_labels) + 1)
            labels = indexer[labels]
            parent_objects = unique_labels
        else:
            parent_objects = numpy.arange(1, nobjects + 1)
        #
        # Add the objects
        #
        remaining_objects = Objects()
        remaining_objects.segmented = labels
        remaining_objects.unedited_segmented = original_objects.unedited_segmented
        workspace.object_set.add_objects(remaining_objects, remaining_object_name)
        #
        # Add measurements
        #
        m = workspace.measurements
        m.add_measurement(
            remaining_object_name, FF_PARENT % object_name, parent_objects,
        )
        if numpy.max(original_objects.segmented) == 0:
            child_count = numpy.array([], int)
        else:
            child_count = fixup_scipy_ndimage_result(
                scipy.ndimage.sum(
                    labels,
                    original_objects.segmented,
                    numpy.arange(1, nobjects + 1, dtype=numpy.int32),
                )
            )
            child_count = (child_count > 0).astype(int)
        m.add_measurement(
            object_name, FF_CHILDREN_COUNT % remaining_object_name, child_count,
        )
        if self.retain_or_renumber == R_RETAIN:
            remaining_object_count = nobjects
        else:
            remaining_object_count = len(unique_labels)
        add_object_count_measurements(m, remaining_object_name, remaining_object_count)
        add_object_location_measurements(m, remaining_object_name, labels)
        #
        # Save the input, mask and output images for display
        #
        if self.show_window:
            workspace.display_data.original_labels = original_objects.segmented
            workspace.display_data.final_labels = labels
            workspace.display_data.mask = mask
    def run(self, workspace):
        objects_name = self.objects_name.value
        objects = workspace.object_set.get_objects(objects_name)
        assert isinstance(objects, Objects)
        labels = objects.segmented
        if self.relabel_option == OPTION_SPLIT:
            output_labels, count = scipy.ndimage.label(
                labels > 0, numpy.ones((3, 3), bool))
        else:
            if self.merge_option == UNIFY_DISTANCE:
                mask = labels > 0
                if self.distance_threshold.value > 0:
                    #
                    # Take the distance transform of the reverse of the mask
                    # and figure out what points are less than 1/2 of the
                    # distance from an object.
                    #
                    d = scipy.ndimage.distance_transform_edt(~mask)
                    mask = d < self.distance_threshold.value / 2 + 1
                output_labels, count = scipy.ndimage.label(
                    mask, numpy.ones((3, 3), bool))
                output_labels[labels == 0] = 0
                if self.wants_image:
                    output_labels = self.filter_using_image(workspace, mask)
            elif self.merge_option == UNIFY_PARENT:
                parents_name = self.parent_object.value
                parents_of = workspace.measurements[objects_name, "_".join(
                    (C_PARENT, parents_name))]
                output_labels = labels.copy().astype(numpy.uint32)
                output_labels[labels > 0] = parents_of[labels[labels > 0] - 1]
                if self.merging_method == UM_CONVEX_HULL:
                    ch_pts, n_pts = centrosome.cpmorphology.convex_hull(
                        output_labels)
                    ijv = centrosome.cpmorphology.fill_convex_hulls(
                        ch_pts, n_pts)
                    output_labels[ijv[:, 0], ijv[:, 1]] = ijv[:, 2]

        output_objects = Objects()
        output_objects.segmented = output_labels
        if objects.has_small_removed_segmented:
            output_objects.small_removed_segmented = copy_labels(
                objects.small_removed_segmented, output_labels)
        if objects.has_unedited_segmented:
            output_objects.unedited_segmented = copy_labels(
                objects.unedited_segmented, output_labels)
        output_objects.parent_image = objects.parent_image
        workspace.object_set.add_objects(output_objects,
                                         self.output_objects_name.value)

        measurements = workspace.measurements
        add_object_count_measurements(
            measurements,
            self.output_objects_name.value,
            numpy.max(output_objects.segmented),
        )
        add_object_location_measurements(measurements,
                                         self.output_objects_name.value,
                                         output_objects.segmented)

        #
        # Relate the output objects to the input ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = objects.relate_children(
            output_objects)
        measurements.add_measurement(
            self.objects_name.value,
            FF_CHILDREN_COUNT % self.output_objects_name.value,
            children_per_parent,
        )
        measurements.add_measurement(
            self.output_objects_name.value,
            FF_PARENT % self.objects_name.value,
            parents_of_children,
        )

        if self.show_window:
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.output_labels = output_objects.segmented
            if self.merge_option == UNIFY_PARENT:
                workspace.display_data.parent_labels = workspace.object_set.get_objects(
                    self.parent_object.value).segmented
    def run(self, workspace):
        image_name = self.image_name.value
        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        workspace.display_data.statistics = []
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.x_name.value)
        if img.shape != objects.shape:
            raise ValueError(
                "This module requires that the input image and object sets are the same size.\n"
                "The %s image and %s objects are not (%s vs %s).\n"
                "If they are paired correctly you may want to use the Resize, ResizeObjects or "
                "Crop module(s) to make them the same size." % (
                    image_name,
                    self.x_name.value,
                    img.shape,
                    objects.shape,
                ))
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        else:
            thresholded_image, global_threshold, sigma = self._threshold_image(
                image_name, workspace)
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.threshold_sigma = sigma
            has_threshold = True

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = numpy.hstack(
            (labels_in[0, :], labels_in[-1, :], labels_in[:,
                                                          0], labels_in[:,
                                                                        -1]))
        labels_touching_edge = numpy.unique(labels_touching_edge)
        is_touching = numpy.zeros(numpy.max(labels_in) + 1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = numpy.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances, (i, j) = scipy.ndimage.distance_transform_edt(
                    labels_in == 0, return_indices=True)
                labels_out = numpy.zeros(labels_in.shape, int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] = labels_in[i[dilate_mask],
                                                    j[dilate_mask]]
            else:
                labels_out, distances = centrosome.propagate.propagate(
                    img, labels_in, thresholded_image, 1.0)
                labels_out[distances > self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in > 0]
            if self.fill_holes:
                label_mask = labels_out == 0
                small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes(
                    labels_out, mask=label_mask)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = centrosome.propagate.propagate(
                img, labels_in, thresholded_image,
                self.regularization_factor.value)
            if self.fill_holes:
                label_mask = labels_out == 0
                small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes(
                    labels_out, mask=label_mask)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = numpy.abs(scipy.ndimage.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = numpy.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = numpy.logical_and(watershed_mask, mask)

            #
            # Perform the first watershed
            #

            labels_out = skimage.segmentation.watershed(
                connectivity=numpy.ones((3, 3), bool),
                image=sobel_image,
                markers=labels_in,
                mask=watershed_mask,
            )

            if self.fill_holes:
                label_mask = labels_out == 0
                small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes(
                    labels_out, mask=label_mask)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1 - img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = numpy.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = numpy.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #

            labels_out = skimage.segmentation.watershed(
                connectivity=numpy.ones((3, 3), bool),
                image=inverted_img,
                markers=labels_in,
                mask=watershed_mask,
            )

            if self.fill_holes:
                label_mask = labels_out == 0
                small_removed_segmented_out = centrosome.cpmorphology.fill_labeled_holes(
                    labels_out, mask=label_mask)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scipy.ndimage.maximum(
                segmented_out,
                objects.segmented,
                list(range(numpy.max(objects.segmented) + 1)),
            )
            lookup = centrosome.cpmorphology.fixup_scipy_ndimage_result(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = numpy.arange(numpy.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image

        #
        # Add the objects to the object set
        #
        objects_out = Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.y_name.value
        workspace.object_set.add_objects(objects_out, objname)
        object_count = numpy.max(segmented_out)
        #
        # Add measurements
        #
        measurements = workspace.measurements
        super(IdentifySecondaryObjects, self).add_measurements(workspace)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = objects.relate_children(
            objects_out)
        measurements.add_measurement(
            self.x_name.value,
            FF_CHILDREN_COUNT % objname,
            children_per_parent,
        )
        measurements.add_measurement(
            objname,
            FF_PARENT % self.x_name.value,
            parents_of_children,
        )
        image_numbers = (numpy.ones(len(parents_of_children), int) *
                         measurements.image_set_number)
        mask = parents_of_children > 0
        measurements.add_relate_measurement(
            self.module_num,
            R_PARENT,
            self.x_name.value,
            self.y_name.value,
            image_numbers[mask],
            parents_of_children[mask],
            image_numbers[mask],
            numpy.arange(1,
                         len(parents_of_children) + 1)[mask],
        )
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(
                new_objects, self.new_primary_objects_name.value)
            super(IdentifySecondaryObjects, self).add_measurements(
                workspace,
                input_object_name=self.x_name.value,
                output_object_name=self.new_primary_objects_name.value,
            )

            children_per_parent, parents_of_children = new_objects.relate_children(
                objects_out)

            measurements.add_measurement(
                self.new_primary_objects_name.value,
                FF_CHILDREN_COUNT % objname,
                children_per_parent,
            )

            measurements.add_measurement(
                objname,
                FF_PARENT % self.new_primary_objects_name.value,
                parents_of_children,
            )

        if self.show_window:
            object_area = numpy.sum(segmented_out > 0)
            workspace.display_data.object_pct = (
                100 * object_area / numpy.product(segmented_out.shape))
            workspace.display_data.img = img
            workspace.display_data.segmented_out = segmented_out
            workspace.display_data.primary_labels = objects.segmented
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.object_count = object_count
Пример #21
0
    def test_shape(self):
        objects = Objects()

        objects.segmented = numpy.ones((224, 224), numpy.uint8)

        assert objects.shape == (224, 224)