Example #1
0
    def create_settings(self):
        self.operand_choice = Choice(
            "Measure the area occupied by",
            [O_BINARY_IMAGE, O_OBJECTS, O_BOTH],
            doc="""\
Area occupied can be measured in two ways:

-  *{O_BINARY_IMAGE}:* The area occupied by the foreground in a binary (black and white) image.
-  *{O_OBJECTS}:* The area occupied by previously-identified objects.
                    """.format(**{
                "O_BINARY_IMAGE": O_BINARY_IMAGE,
                "O_OBJECTS": O_OBJECTS
            }),
        )

        self.divider = Divider()

        self.images_list = ImageListSubscriber(
            "Select binary images to measure",
            [],
            doc="""*(Used only if ‘{O_BINARY_IMAGE}’ is to be measured)*

These should be binary images created earlier in the pipeline, where you would
like to measure the area occupied by the foreground in the image.
                    """.format(**{"O_BINARY_IMAGE": O_BINARY_IMAGE}),
        )

        self.objects_list = LabelListSubscriber(
            "Select object sets to measure",
            [],
            doc="""*(Used only if ‘{O_OBJECTS}’ are to be measured)*

Select the previously identified objects you would like to measure.""".format(
                **{"O_OBJECTS": O_OBJECTS}),
        )
Example #2
0
            def __init__(self, index, operation):
                self.__index = index
                self.__operation = operation
                self.__operand_choice = Choice(
                    self.operand_choice_text(),
                    MC_ALL,
                    doc="""Indicate whether the operand is an image or object measurement.""",
                )

                self.__operand_objects = LabelName(
                    self.operand_objects_text(),
                    "None",
                    doc="""Choose the objects you want to measure for this operation.""",
                )

                self.__operand_measurement = Measurement(
                    self.operand_measurement_text(),
                    self.object_fn,
                    doc="""\
Enter the category that was used to create the measurement. You
will be prompted to add additional information depending on
the type of measurement that is requested.""",
                )

                self.__multiplicand = Float(
                    "Multiply the above operand by",
                    1,
                    doc="""Enter the number by which you would like to multiply the above operand.""",
                )

                self.__exponent = Float(
                    "Raise the power of above operand by",
                    1,
                    doc="""Enter the power by which you would like to raise the above operand.""",
                )
Example #3
0
    def create_settings(self):
        super(FindMaxima, self).create_settings()

        self.min_distance = Integer(
            text="Minimum distance between maxima",
            value=5,
            minval=0,
            doc="""Choose the minimum distance between accepted local maxima"""
        )

        self.exclude_mode = Choice("Method for excluding background",
                                   [MODE_THRESHOLD, MODE_MASK, MODE_OBJECTS],
                                   value="Threshold",
                                   doc=f"""\
By default, local maxima will be searched for across the whole image. This means that maxima will be found in 
areas that consist entirely of background. To resolve this we have several methods to exclude background.

**{MODE_THRESHOLD}** allows you to specify a minimum pixel intensity to be considered as a peak. Setting this to 0
effectively uses no threshold.

**{MODE_MASK}** will restrict peaks to areas which are within a provided mask image. This mask will typically come from 
the threshold module or another means of finding background.

**{MODE_OBJECTS}** will restrict peaks to areas within an existing set of objects.
""")

        self.min_intensity = Float("Specify the minimum intensity of a peak",
                                   0,
                                   minval=0,
                                   maxval=99,
                                   doc="""\
Intensity peaks below this threshold value will be excluded. Use this to ensure that your local 
maxima are within objects of interest.""")

        self.mask_image = ImageSubscriber(
            "Select the image to use as a mask",
            doc=
            "Select the image you want to use. This should be a binary image.")

        self.mask_objects = LabelSubscriber(
            "Select the objects to search within",
            doc="Select the objects within which to search for peaks.")

        self.maxima_color = Color(
            "Select maxima preview color",
            "Blue",
            doc="Maxima will be displayed in this color.",
        )

        self.maxima_size = Integer(
            "Select maxima preview size",
            value=1,
            minval=1,
            doc=
            "Size of the markers for each maxima in the preview. Positive pixels will be"
            "expanded by this radius."
            "You may want to increase this when working with large images.",
        )

        self.spacer = Divider(line=True)
    def create_settings(self):

        self.input_object_name = LabelSubscriber(
            "Select objects to measure",
            "None",
            doc=
            """Select the objects whose radial entropy you want to measure.""")

        self.input_image_name = ImageSubscriber("Select an image to measure",
                                                "None",
                                                doc="""Select the
            grayscale image you want to measure the entropy of.""")

        self.bin_number = Integer(
            "Input number of bins",
            6,
            minval=3,
            maxval=60,
            doc=
            """Number of radial bins to divide your object into.  The minimum number
            of bins allowed is 3, the maximum number is 60.""")

        self.intensity_measurement = Choice(
            "Which intensity measurement should be used?",
            ['Mean', 'Median', 'Integrated'],
            value='Mean',
            doc="""
            Whether each wedge's mean, median, or integrated intensity
            should be used to calculate the entropy.""")
 def create_settings(self):
     self.object_name = LabelSubscriber(
         "Input objects",
         "None",
         doc="""Enter the name of the objects whose population context is
         to be measured.""")
     self.operation = Choice(
         "Operation",
         choices=(O_POPULATION_DENSITY, O_DISTANCE_TO_EDGE, O_BOTH),
         doc="""Select the measurements you wish to perform. The choices
         are:<br><ul>
         <li><i>%(O_POPULATION_DENSITY)s</i> - calculate the population
         density within a radius from each cell.</li>
         <li><i>%(O_DISTANCE_TO_EDGE)s</i> - calculate the distance of
         each cell from the edge of a binary mask.</li>
         <li><i>%(O_BOTH)s</i> - make both measurements""" % globals())
     self.radius = Integer("Search radius",
                           50,
                           minval=1,
                           doc="""Count all objects within this radius""")
     self.object_diameter = Integer(
         "Object diameter",
         20,
         minval=0,
         doc="""The average diameter of objects in the image. This number
         is used to adjust the area of the image to account for objects
         that would otherwise be excluded because they were touching
         the border.""")
     self.edge_image = ImageSubscriber(
         "Edge image",
         doc="""For measuring distance to an edge, this is the reference
         image. Cell distances will be computed to the nearest foreground / 
         background edge in the reference image.""")
Example #6
0
    def create_settings(self):
        super(Predict, self).create_settings()

        self.executable = Pathname(
            "Executable",
            doc="ilastik command line executable name, or location if it is not on your path."
        )

        self.project_file = Pathname(
            "Project file",
            doc="Path to the project file (\*.ilp)."
        )

        self.project_type = Choice(
            "Select the project type",
            [
                "Pixel Classification",
                "Autocontext (2-stage)"
            ],
            "Pixel Classification",
            doc="""\
Select the project type which matches the project file specified by
*Project file*. CellProfiler supports two types of ilastik projects:

-  *Pixel Classification*: Classify the pixels of an image given user
   annotations. `Read more`_.

-  *Autocontext (2-stage)*: Perform pixel classification in multiple
   stages, sharing predictions between stages to improve results. `Read
   more <http://ilastik.org/documentation/autocontext/autocontext>`__.

.. _Read more: http://ilastik.org/documentation/pixelclassification/pixelclassification
"""
        )
    def add_step_parent(self, can_delete=True):
        group = SettingsGroup()

        group.append(
            "step_parent_name",
            Choice(
                "Parent name",
                ["None"],
                choices_fn=self.get_step_parents,
                doc="""\
*(Used only if calculating distances to another parent)*

Choose the name of the other parent. The **RelateObjects** module will
measure the distance from this parent to the child objects in the same
manner as it does to the primary parents. You can only choose the
parents or children of the parent object.""",
            ),
        )

        if can_delete:
            group.append(
                "remove",
                RemoveSettingButton("", "Remove this object",
                                    self.step_parent_names, group),
            )

        self.step_parent_names.append(group)
    def create_settings(self):
        self._ij = None

        self.host = Text(
            "ImageJ server",
            imagej.HOST
        )

        self.connect = DoSomething(
            "",
            "Connect",
            self._connect
        )

        self.divider = cellprofiler_core.setting.Divider(u"———OUTPUTS———")

        # These will get redefined after the module connects to the server.
        self.ij_module = Choice(
            "ImageJ module",
            choices=["-- NONE --"]
        )

        self.input_details = []
        self.input_settings = cellprofiler_core.setting.SettingsGroup()

        self.output_details = []
        self.output_settings = cellprofiler_core.setting.SettingsGroup()

        self.input_count = cellprofiler_core.setting.HiddenCount([], "")
Example #9
0
    def create_settings(self):
        self.object_name = LabelSubscriber(
            "Select the input objects",
            "None",
            doc=
            "Choose the name of the objects you want to convert to an image.",
        )

        self.image_name = ImageName(
            "Name the output image",
            "CellImage",
            doc="Enter the name of the resulting image.",
        )

        self.image_mode = Choice(
            "Select the color format",
            ["Color", "Binary (black & white)", "Grayscale", "uint16"],
            doc="""\
Select which colors the resulting image should use. You have the
following options:

-  *Color:* Allows you to choose a colormap that will produce jumbled
   colors for your objects.
-  *Binary (black & white):* All object pixels will be assigned 1 and
   all background pixels will be assigned 0, creating a binary image.
-  *Grayscale:* Assigns all background pixels to 0 and assigns each object's pixels with a number 
   specific to that object. Object numbers can range from 1 to 255 (the maximum value that you can put
   in an 8-bit integer, use **uint16** if you expect more than 255 objects).
   This creates an image where objects in the top left corner of the image are
   very dark and the colors progress to white toward the bottom right corner of the image.
   Use **SaveImages** to save the resulting image as a .npy file or .tiff file if you want
   to process the label matrix image using another program or in a separate CellProfiler pipeline.
-  *uint16:* Assigns all background pixels to 0 and assigns each object's pixels with a number 
   specific to that object. Object numbers can range from 1 to 65535 (the maximum value that you can put
   in a 16-bit integer). This creates an image where objects in the top left corner of the image are
   very dark and where the colors progress to white toward the bottom right corner of the image
   (though this can usually only be seen in a scientific image viewer since standard image viewers only
   handle 8-bit images). Use **SaveImages** to save the resulting image as a .npy file or
   **16-bit** (not 8-bit!) .tiff file if you want to process the label matrix image using another
   program or in a separate CellProfiler pipeline.

You can choose *Color* with a *Gray* colormap to produce jumbled gray
objects.
            """,
        )

        self.colormap = Colormap(
            "Select the colormap",
            doc="""\
*(Used only if "Color" output image selected)*

Choose the colormap to be used, which affects how the objects are
colored. You can look up your default colormap under *File >
Preferences*.
""",
        )
    def create_settings(self):
        super(ResizeObjects, self).create_settings()

        self.method = Choice(
            "Method",
            ["Dimensions", "Factor", "Match Image"],
            doc="""\
The following options are available:

-  *Dimensions:* Enter the new height and width of the resized objects.
-  *Factor:* Enter a single value which specifies the scaling.""",
            value="Factor",
        )

        self.factor = Float(
            "Factor",
            0.25,
            minval=0,
            doc="""\
*(Used only if resizing by "Factor")*

Numbers less than 1 will shrink the objects; numbers greater than 1 will
enlarge the objects.""",
        )

        self.width = Integer(
            "Width",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by "Dimensions")*

Enter the desired width of the final objects, in pixels.""",
        )

        self.height = Integer(
            "Height",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by "Dimensions")*

Enter the desired height of the final objects, in pixels.""",
        )

        self.specific_image = ImageSubscriber(
            "Select the image with the desired dimensions",
            "None",
            doc="""\
        *(Used only if resizing by specifying desired final dimensions using an image)*

        The input object set will be resized to the dimensions of the specified image.""",
        )
Example #11
0
    def create_settings(self):
        self.export_option = Choice(
            "Do you want to save cropped images or object masks?",
            [SAVE_PER_OBJECT, SAVE_MASK],
            doc="""\
Choose the way you want the per-object crops to be exported.

The choices are:

-  *{SAVE_PER_OBJECT}*: Save a per-object crop from the original image
   based on the object's bounding box.
-  *{SAVE_MASK}*: Export a per-object mask.""".format(
                SAVE_PER_OBJECT=SAVE_PER_OBJECT, SAVE_MASK=SAVE_MASK),
        )

        self.objects_name = LabelSubscriber(
            "Objects",
            doc="Select the objects you want to export as per-object crops.")

        self.image_name = ImageSubscriber("Image",
                                          doc="Select the image to crop")

        self.directory = Directory(
            "Directory",
            doc="Enter the directory where object crops are saved.",
            value=DEFAULT_OUTPUT_FOLDER_NAME,
        )

        self.file_format = Choice(
            "Saved file format",
            [O_PNG, O_TIFF_8, O_TIFF_16],
            value=O_TIFF_8,
            doc="""\
**{O_PNG}** files do not support 3D. **{O_TIFF_8}** files use zlib compression level 6."""
            .format(O_PNG=O_PNG, O_TIFF_8=O_TIFF_8, O_TIFF_16=O_TIFF_16),
        )
Example #12
0
    def create_settings(self):
        self.objects_x = LabelSubscriber(
            "Select initial object set",
            "None",
            doc="""Select an object set which you want to add objects to.""",
        )

        self.objects_y = LabelSubscriber(
            "Select object set to combine",
            "None",
            doc=
            """Select an object set which you want to add to the initial set.""",
        )

        self.merge_method = Choice(
            "Select how to handle overlapping objects",
            choices=["Merge", "Preserve", "Discard", "Segment"],
            doc="""\
When combining sets of objects, it is possible that both sets had an object in the
same location. Use this setting to choose how to handle objects which overlap with
eachother.
        
- Selecting "Merge" will make overlapping objects combine into a single object, taking
  on the label of the object from the initial set. When an added object would overlap
  with multiple objects from the initial set, each pixel of the added object will be
  assigned to the closest object from the initial set. This is primarily useful when
  the same objects appear in both sets.
        
- Selecting "Preserve" will protect the initial object set. Any overlapping regions
  from the second set will be ignored in favour of the object from the initial set.
        
- Selecting "Discard" will only add objects which do not have any overlap with objects
  in the initial object set.
        
- Selecting "Segment" will combine both object sets and attempt to re-draw segmentation to
  separate objects which overlapped. Note: This is less reliable when more than
  two objects were overlapping. If two object sets genuinely occupy the same space
  it may be better to consider them seperately.
         """,
        )

        self.output_object = LabelName(
            "Name the combined object set",
            "CombinedObjects",
            doc="""\
Enter the name for the combined object set. These objects will be available for use in
subsequent modules.""",
        )
Example #13
0
    def create_settings(self):
        self.site_count = Integer(
            "Number of image sites per well",
            1,
            minval=1,
            doc="""\
Enter the number of image sets (fields of view) corresponding to each well.""",
        )

        self.column_count = Integer(
            "Number of columns per plate",
            12,
            minval=1,
            doc="""\
Enter the number of columns per plate.""",
        )

        self.row_count = Integer(
            "Number of rows per plate",
            8,
            minval=1,
            doc="""\
Enter the number of rows per plate.""",
        )

        self.order = Choice(
            "Order of image data",
            [O_ROW, O_COLUMN],
            doc="""\
This setting specifies how the input data is ordered (assuming that
sites within a well are ordered consecutively):

-  *%(O_ROW)s:* The data appears by row and then by column. That is,
   all columns for a given row (e.g., A01, A02, A03…) appear
   consecutively, for each row in consecutive order.
-  *%(O_COLUMN)s:* The data appears by column and then by row. That is,
   all rows for a given column (e.g., A01, B01, C01…) appear
   consecutively, for each column in consecutive order.

For instance, the SBS Bioimage example (available `here`_) has files that are named:
Channel1-01-A01.tif, Channel1-02-A02.tif, …, Channel1-12-A12.tif, Channel1-13-B01.tif, …
You would use “%(O_ROW)s” to label these because the ordering is by row and then by column.

.. _here: http://cellprofiler.org/examples.html#SBS_Bioimage_CNT
""" % globals(),
        )
    def create_settings(self):
        super(TopHatTransform, self).create_settings()

        self.operation_name = Choice(
            choices=["Black top-hat transform", "White top-hat transform"],
            text="Operation",
            value="Black top-hat transform",
            doc="""
            Select the top-hat transformation:
            <ul>
                <li><i>Black top-hat transform</i>: This operation returns the dark spots of the image that are smaller
                than the structuring element. Note that dark spots in the original image are bright spots after the
                black top hat.</li>
                <li><i>White top-hat transform</i>: This operation returns the bright spots of the image that are
                smaller than the structuring element.</li>
            </ul>
            """)

        self.structuring_element = cellprofiler_core.setting.StructuringElement(
        )
    def create_settings(self):
        super(ConstrainObjects, self).create_settings()

        self.reference_name = LabelSubscriber(
            text="Constraining Objects",
            doc="Objects to use as reference for the constraint")

        self.coersion_method = Choice(
            text="Handle protruding objects",
            choices=[METHOD_IGNORE, METHOD_REMOVE],
            value=METHOD_IGNORE,
            doc="""\
Assuming the objects are related, there may be some "child" objects
that protrude into the space of a "parent" object with a different label.
E.g. a nuclei from one cell may protrude into the membrane segmentation 
of a difference cell. This method sets how to handle these cases

**{METHOD_IGNORE}**: Ignore these protrusions, only constrain the real child
**{METHOD_REMOVE}**: Remove the portion of the child that protrudes into the wrong parent
""".format(**{
                "METHOD_IGNORE": METHOD_IGNORE,
                "METHOD_REMOVE": METHOD_REMOVE
            }))

        self.remove_orphans = cellprofiler_core.setting.Binary(
            text="Remove children without a corresponding parent",
            value=False,
            doc="""
Some objects may be "parent-less" orphans, e.g. nuclei segmentations that have no 
corresponding, surrounding membrane segmentations. This specifies how to handle these
objects.

**{NO}**: Ignore them
**{YES}**: Remove the entire object from set
""".format(**{
                "YES": "Yes",
                "NO": "No"
            }))
Example #16
0
    def create_settings(self):
        super(FillObjects, self).create_settings()

        self.size = Float(
            text="Minimum hole size",
            value=64.0,
            doc="Holes smaller than this diameter will be filled.",
        )

        self.planewise = Binary(
            text="Planewise fill",
            value=False,
            doc="""\
Select "*{YES}*" to fill objects on a per-plane level. 
This will perform the hole filling on each plane of a 
volumetric image, rather than on the image as a whole. 
This may be helpful for removing seed artifacts that 
are the result of segmentation.
**Note**: Planewise operations will be considerably slower.
""".format(**{"YES": "Yes"}),
        )

        self.mode = Choice("Filling method", [MODE_HOLES, MODE_CHULL],
                           value=MODE_HOLES,
                           doc=f"""\
Choose the mode for hole filling.

In {MODE_HOLES} mode, the module will search for and fill holes entirely enclosed by
each object. Size of the holes to be removed can be controlled. 

In {MODE_CHULL} mode, the module will apply the convex hull of each object to fill 
missing pixels. This can be useful when round objects have partial holes that are 
not entirely enclosed.

Note: Convex hulls for each object are applied sequentially and may overlap. This means 
that touching objects may not be perfectly convex if there was a region of overlap. 
""")
Example #17
0
    def create_settings(self):
        # the list of per image settings (name & scaling factor)
        self.images = []
        # create the first two images (the default number)
        self.add_image(False)
        self.add_image(False)

        # other settings
        self.operation = Choice(
            "Operation",
            [
                O_ADD,
                O_SUBTRACT,
                O_DIFFERENCE,
                O_MULTIPLY,
                O_DIVIDE,
                O_AVERAGE,
                O_MINIMUM,
                O_MAXIMUM,
                O_INVERT,
                O_LOG_TRANSFORM,
                O_LOG_TRANSFORM_LEGACY,
                O_AND,
                O_OR,
                O_NOT,
                O_EQUALS,
                O_NONE,
            ],
            doc="""\
Select the operation to perform. Note that if more than two images are
chosen, then operations will be performed sequentially from first to
last, e.g., for “Divide”, (Image1 / Image2) / Image3

-  *%(O_ADD)s:* Adds the first image to the second, and so on.
-  *%(O_SUBTRACT)s:* Subtracts the second image from the first.
-  *%(O_DIFFERENCE)s:* The absolute value of the difference between the
   first and second images.
-  *%(O_MULTIPLY)s:* Multiplies the first image by the second.
-  *%(O_DIVIDE)s:* Divides the first image by the second.
-  *%(O_AVERAGE)s:* Calculates the mean intensity of the images loaded
   in the module. This is equivalent to the Add option divided by the
   number of images loaded by this module. If you would like to average
   all of the images in an entire pipeline, i.e., across cycles, you
   should instead use the **CorrectIlluminationCalculate** module and
   choose the *All* (vs. *Each*) option.
-  *%(O_MINIMUM)s:* Returns the element-wise minimum value at each
   pixel location.
-  *%(O_MAXIMUM)s:* Returns the element-wise maximum value at each
   pixel location.
-  *%(O_INVERT)s:* Subtracts the image intensities from 1. This makes
   the darkest color the brightest and vice-versa. Note that if a
   mask has been applied to the image, the mask will also be inverted.
-  *%(O_LOG_TRANSFORM)s:* Log transforms each pixel’s intensity. The
   actual function is log\ :sub:`2`\ (image + 1), transforming values
   from 0 to 1 into values from 0 to 1.
-  *%(O_LOG_TRANSFORM_LEGACY)s:* Log\ :sub:`2` transform for backwards
   compatibility.
-  *%(O_NONE)s:* This option is useful if you simply want to select some
   of the later options in the module, such as adding, multiplying, or
   exponentiating your image by a constant.

The following are operations that produce binary images. In a binary
image, the foreground has a truth value of “true” (ones) and the background has
a truth value of “false” (zeros). The operations, *%(O_OR)s, %(O_AND)s and
%(O_NOT)s* will convert the input images to binary by changing all zero
values to background (false) and all other values to foreground (true).

-  *%(O_AND)s:* a pixel in the output image is in the foreground only
   if all corresponding pixels in the input images are also in the
   foreground.
-  *%(O_OR)s:* a pixel in the output image is in the foreground if a
   corresponding pixel in any of the input images is also in the
   foreground.
-  *%(O_NOT)s:* the foreground of the input image becomes the
   background of the output image and vice-versa.
-  *%(O_EQUALS)s:* a pixel in the output image is in the foreground if
   the corresponding pixels in the input images have the same value.

Note that *%(O_INVERT)s*, *%(O_LOG_TRANSFORM)s*,
*%(O_LOG_TRANSFORM_LEGACY)s* and *%(O_NONE)s* operate on only a
single image.
""" % globals(),
        )
        self.divider_top = Divider(line=False)

        self.exponent = Float(
            "Raise the power of the result by",
            1,
            doc="""\
Enter an exponent to raise the result to *after* the chosen operation.""",
        )

        self.after_factor = Float(
            "Multiply the result by",
            1,
            doc="""\
Enter a factor to multiply the result by *after* the chosen operation.""",
        )

        self.addend = Float(
            "Add to result",
            0,
            doc="""\
Enter a number to add to the result *after* the chosen operation.""",
        )

        self.truncate_low = Binary(
            "Set values less than 0 equal to 0?",
            True,
            doc="""\
Values outside the range 0 to 1 might not be handled well by other
modules. Select *Yes* to set negative values to 0.
""" % globals(),
        )

        self.truncate_high = Binary(
            "Set values greater than 1 equal to 1?",
            True,
            doc="""\
Values outside the range 0 to 1 might not be handled well by other
modules. Select *Yes* to set values greater than 1 to a maximum
value of 1.
""" % globals(),
        )

        self.replace_nan = Binary(
            "Replace invalid values with 0?",
            True,
            doc="""\
        Certain operations are mathematically invalid (divide by zero, 
        raise a negative number to the power of a fraction, etc.).
        This setting will set pixels with invalid values to zero.
        Disabling this setting will represent these pixels as "nan" 
        ("Not A Number"). "nan" pixels cannot be displayed properly and 
        may cause errors in other modules.
        """ % globals(),
        )

        self.ignore_mask = Binary(
            "Ignore the image masks?",
            False,
            doc="""\
Select *Yes* to set equal to zero all previously masked pixels and
operate on the masked images as if no mask had been applied. Otherwise,
the smallest image mask is applied after image math has been completed.
""" % globals(),
        )

        self.output_image_name = ImageName(
            "Name the output image",
            "ImageAfterMath",
            doc="""\
Enter a name for the resulting image.""",
        )

        self.add_button = DoSomething("", "Add another image", self.add_image)

        self.divider_bottom = Divider(line=False)
Example #18
0
    def add_image(self, removable=True):
        # The text for these settings will be replaced in renumber_settings()
        group = SettingsGroup()
        group.removable = removable
        group.append(
            "image_or_measurement",
            Choice(
                "Image or measurement?",
                [IM_IMAGE, IM_MEASUREMENT],
                doc="""\
You can perform math operations using two images or you can use a
measurement for one of the operands. For instance, to divide the
intensity of one image by another, choose *%(IM_IMAGE)s* for both and
pick the respective images. To divide the intensity of an image by its
median intensity, use **MeasureImageIntensity** prior to this module to
calculate the median intensity, then select *%(IM_MEASUREMENT)s* and
use the median intensity measurement as the denominator.
""" % globals(),
            ),
        )

        group.append(
            "image_name",
            ImageSubscriber(
                "Select the image",
                "None",
                doc="""\
Select the image that you want to use for this operation.""",
            ),
        )

        group.append(
            "measurement",
            Measurement(
                "Measurement",
                lambda: "Image",
                "",
                doc="""\
Select a measurement made on the image. The value of the
measurement is used for the operand for all of the pixels of the
other operand's image.""",
            ),
        )

        group.append(
            "factor",
            Float(
                "Multiply the image by",
                1,
                doc="""\
Enter the number that you would like to multiply the above image by. This multiplication
is applied before other operations.""",
            ),
        )

        if removable:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this image", self.images,
                                    group),
            )

        group.append("divider", Divider())
        self.images.append(group)
Example #19
0
    def create_settings(self):
        """Create the settings here and set the module name (initialization)

        """
        self.source_choice = Choice(
            "Use objects or an image as a mask?",
            [IO_OBJECTS, IO_IMAGE],
            doc="""\
You can mask an image in two ways:

-  *%(IO_OBJECTS)s*: Using objects created by another module (for
   instance **IdentifyPrimaryObjects**). The module will mask out all
   parts of the image that are not within one of the objects (unless you
   invert the mask).
-  *%(IO_IMAGE)s*: Using a binary image as the mask, where black
   portions of the image (false or zero-value pixels) will be masked
   out. If the image is not binary, the module will use all pixels whose
   intensity is greater than 0.5 as the mask’s foreground (white area).
   You can use **Threshold** instead to create a binary image with
   finer control over the intensity choice.
   """ % globals(),
        )

        self.object_name = LabelSubscriber(
            "Select object for mask",
            "None",
            doc="""\
*(Used only if mask is to be made from objects)*

Select the objects you would like to use to mask the input image.
""",
        )

        self.masking_image_name = ImageSubscriber(
            "Select image for mask",
            "None",
            doc="""\
*(Used only if mask is to be made from an image)*

Select the image that you like to use to mask the input image.
""",
        )

        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="Select the image that you want to mask.",
        )

        self.masked_image_name = ImageName(
            "Name the output image",
            "MaskBlue",
            doc="Enter the name for the output masked image.",
        )

        self.invert_mask = Binary(
            "Invert the mask?",
            False,
            doc="""\
This option reverses the foreground/background relationship of the mask.

-  Select "*No*" to produce the mask from the foreground (white
   portion) of the masking image or the area within the masking objects.
-  Select "*Yes*" to instead produce the mask from the *background*
   (black portions) of the masking image or the area *outside* the
   masking objects.
       """ % globals(),
        )
    def create_settings(self):
        #
        # The ImageSubscriber "subscribes" to all ImageNameProviders in
        # prior modules. Modules before yours will put images into CellProfiler.
        # The ImageSubscriber gives your user a list of these images
        # which can then be used as inputs in your module.
        #
        self.input_image_name = ImageSubscriber(
            # The text to the left of the edit box
            "Input image name",
            # HTML help that gets displayed when the user presses the
            # help button to the right of the edit box
            doc="""This is the image that the module operates on. You can
            choose any image that is made available by a prior module.
            <br>
            <b>ImageTemplate</b> will do something to this image.
            """)
        #
        # The text.ImageName makes the image available to subsequent
        # modules.
        #
        self.output_image_name = ImageName(
            "Output image name",
            # The second parameter holds a suggested name for the image.
            "OutputImage",
            doc="""This is the image resulting from the operation.""")
        #
        # Here's a choice box - the user gets a drop-down list of what
        # can be done.
        #
        self.transform_choice = Choice(
            "Transform choice",
            # The choice takes a list of possibilities. The first one
            # is the default - the one the user will typically choose.
            [
                M_FOURIER, M_SIMONCELLI_P, M_SIMONCELLI_R, M_TEST_FOURIER,
                M_TEST_SIMONCELLI_P, M_TEST_SIMONCELLI_R, M_HAAR_S, M_HAAR_T,
                M_TEST_HAAR, M_CHEBYSHEV_T
            ],
            #
            # Here, in the documentation, we do a little trick so that
            # we use the actual text that's displayed in the documentation.
            #
            # %(GRADIENT_MAGNITUDE)s will get changed into "Gradient magnitude"
            # etc. Python will look in globals() for the "GRADIENT_" names
            # and paste them in where it sees %(GRADIENT_...)s
            #
            # The <ul> and <li> tags make a neat bullet-point list in the docs
            #
            doc='''There are several transforms available:
             <ul><li><i>Fourier Transform:</i> Blabla. </li>
             <li><i>Wavelet Transform:</i> Blabla. </li>
             <li><i>Chebyshev Transform:</i> Blabla. </li></ul>''' % globals())
        #
        # We use a float setting so that the user can give us a number
        # for the scale. The control will turn red if the user types in
        # an invalid scale.
        #
        self.scale = Integer(
            "Scale",
            # The default value is 1 - a short-range scale
            3,
            # We don't let the user type in really small values
            minval=1,
            # or large values
            maxval=100,
            doc="""This is a scaling factor that supplies the sigma for
            a gaussian that's used to smooth the image. The gradient is
            calculated on the smoothed image, so large scales will give
            you long-range gradients and small scales will give you
            short-range gradients""")

        self.M = Integer(
            "Order",
            # The default value is 1 - a short-range scale
            0,
            # We don't let the user type in really small values
            minval=0,
            # or large values
            maxval=50,
            doc=
            """This is the order of the Chebyshev Transform. A value of 0 will use the order matching the image dimensions."""
        )
    def add_image(self, can_delete=True):
        """Add an image and its settings to the list of images"""
        image_name = ImageSubscriber(
            "Select the input image", "None", doc="Select the image to be corrected."
        )

        corrected_image_name = ImageName(
            "Name the output image",
            "CorrBlue",
            doc="Enter a name for the corrected image.",
        )

        illum_correct_function_image_name = ImageSubscriber(
            "Select the illumination function",
            "None",
            doc="""\
Select the illumination correction function image that will be used to
carry out the correction. This image is usually produced by another
module or loaded as a .mat or .npy format image using the **Images** module
or a **LoadData** module.

Note that loading .mat format images is deprecated and will be removed in
a future version of CellProfiler. You can export .mat format images as
.npy format images using **SaveImages** to ensure future compatibility.
""",
        )

        divide_or_subtract = Choice(
            "Select how the illumination function is applied",
            [DOS_DIVIDE, DOS_SUBTRACT],
            doc="""\
This choice depends on how the illumination function was calculated and
on your physical model of the way illumination variation affects the
background of images relative to the objects in images; it is also
somewhat empirical.

-  *%(DOS_SUBTRACT)s:* Use this option if the background signal is
   significant relative to the real signal coming from the cells. If you
   created the illumination correction function using
   *Background*, then you will want to choose
   *%(DOS_SUBTRACT)s* here.
-  *%(DOS_DIVIDE)s:* Choose this option if the signal to background
   ratio is high (the cells are stained very strongly). If you created
   the illumination correction function using *Regular*, then
   you will want to choose *%(DOS_DIVIDE)s* here.
"""
            % globals(),
        )

        image_settings = SettingsGroup()
        image_settings.append("image_name", image_name)
        image_settings.append("corrected_image_name", corrected_image_name)
        image_settings.append(
            "illum_correct_function_image_name", illum_correct_function_image_name
        )
        image_settings.append("divide_or_subtract", divide_or_subtract)
        image_settings.append("rescale_option", RE_NONE)

        if can_delete:
            image_settings.append(
                "remover",
                RemoveSettingButton(
                    "", "Remove this image", self.images, image_settings
                ),
            )
        image_settings.append("divider", Divider())
        self.images.append(image_settings)
Example #22
0
    def create_settings(self):
        """Create your settings by subclassing this function

        create_settings is called at the end of initialization.

        You should create the setting variables for your module here:
            # Ask the user for the input image
            self.image_name = .ImageSubscriber(...)
            # Ask the user for the name of the output image
            self.output_image = .ImageName(...)
            # Ask the user for a parameter
            self.smoothing_size = .Float(...)
        """
        self.object_name = LabelSubscriber(
            "Select the objects to be edited",
            "None",
            doc="""\
Choose a set of previously identified objects
for editing, such as those produced by one of the
**Identify** modules (e.g., "*IdentifyPrimaryObjects*", "*IdentifySecondaryObjects*" etc.).""",
        )

        self.filtered_objects = LabelName(
            "Name the edited objects",
            "EditedObjects",
            doc="""\
Enter the name for the objects that remain
after editing. These objects will be available for use by
subsequent modules.""",
        )

        self.allow_overlap = Binary(
            "Allow overlapping objects?",
            False,
            doc="""\
**EditObjectsManually** can allow you to edit an object so that it
overlaps another or it can prevent you from overlapping one object with
another. Objects such as worms or the neurites of neurons may cross each
other and might need to be edited with overlapping allowed, whereas a
monolayer of cells might be best edited with overlapping off.
Select "*Yes*" to allow overlaps or select "*No*" to prevent them.
""" % globals(),
        )

        self.renumber_choice = Choice(
            "Numbering of the edited objects",
            [R_RENUMBER, R_RETAIN],
            doc="""\
Choose how to number the objects that remain after editing, which
controls how edited objects are associated with their predecessors:

-  *%(R_RENUMBER)s:* The module will number the objects that remain
   using consecutive numbers. This is a good choice if you do not plan
   to use measurements from the original objects and you only want to
   use the edited objects in downstream modules; the objects that remain
   after editing will not have gaps in numbering where removed objects
   are missing.
-  *%(R_RETAIN)s:* This option will retain each object’s original
   number so that the edited object’s number matches its original
   number. This allows any measurements you make from the edited objects
   to be directly aligned with measurements you might have made of the
   original, unedited objects (or objects directly associated with
   them).
""" % globals(),
        )

        self.wants_image_display = Binary(
            "Display a guiding image?",
            True,
            doc="""\
Select "*Yes*" to display an image and outlines of the objects.

Select "*No*" if you do not want a guide image while editing.
""" % globals(),
        )

        self.image_name = ImageSubscriber(
            "Select the guiding image",
            "None",
            doc="""\
*(Used only if a guiding image is desired)*

This is the image that will appear when editing objects. Choose an image
supplied by a previous module.
""",
        )
    def create_settings(self):
        super(RelateObjects, self).create_settings()

        self.x_name.text = "Parent objects"

        self.x_name.doc = """\
Parent objects are defined as those objects which encompass the child object.
For example, when relating speckles to the nuclei that contain them,
the nuclei are the parents.
        """

        self.y_name = LabelSubscriber(
            "Child objects",
            doc="""\
Child objects are defined as those objects contained within the parent object. For example, when relating
speckles to the nuclei that contains them, the speckles are the children.
            """,
        )

        self.find_parent_child_distances = Choice(
            "Calculate child-parent distances?",
            D_ALL,
            doc="""\
Choose the method to calculate distances of each child to its parent.
For example, these measurements can tell you whether nuclear speckles
are located more closely to the center of the nucleus or to the nuclear
periphery.

-  *{D_NONE}:* Do not calculate any distances. This saves computation time.
-  *{D_MINIMUM}:* The distance from the centroid of the child object to
   the closest perimeter point on the parent object.
-  *{D_CENTROID}:* The distance from the centroid of the child object
   to the centroid of the parent.
-  *{D_BOTH}:* Calculate both the *{D_MINIMUM}* and *{D_CENTROID}*
   distances.""".format(
                **{
                    "D_NONE": D_NONE,
                    "D_MINIMUM": D_MINIMUM,
                    "D_CENTROID": D_CENTROID,
                    "D_BOTH": D_BOTH,
                }),
        )

        self.wants_step_parent_distances = Binary(
            "Calculate distances to other parents?",
            False,
            doc="""\
*(Used only if calculating distances)*

Select "*{YES}*" to calculate the distances of the child objects to some
other objects. These objects must be either parents or children of your
parent object in order for this module to determine the distances. For
instance, you might find “Nuclei” using **IdentifyPrimaryObjects**, find
“Cells” using **IdentifySecondaryObjects** and find “Cytoplasm” using
**IdentifyTertiaryObjects**. You can use **Relate** to relate speckles
to cells and then measure distances to nuclei and cytoplasm. You could
not use **RelateObjects** to relate speckles to cytoplasm and then
measure distances to nuclei, because nuclei are neither a direct parent
nor child of cytoplasm.""".format(**{"YES": "Yes"}),
        )

        self.step_parent_names = []

        self.add_step_parent(can_delete=False)

        self.add_step_parent_button = DoSomething("", "Add another parent",
                                                  self.add_step_parent)

        self.wants_per_parent_means = Binary(
            "Calculate per-parent means for all child measurements?",
            False,
            doc="""\
Select "*{YES}*" to calculate the per-parent mean values of every upstream
measurement made with the children objects and store them as a
measurement for the parent; the nomenclature of this new measurement is
“Mean_<child>_<category>_<feature>”. This module
must be placed *after* all **Measure** modules that make measurements
of the children objects.""".format(**{"YES": "Yes"}),
        )

        self.wants_child_objects_saved = Binary(
            "Do you want to save the children with parents as a new object set?",
            False,
            doc="""\
Select "*{YES}*" to save the children objects that do have parents as new
object set. Objects with no parents will be discarded""".format(
                **{"YES": "Yes"}),
        )

        self.output_child_objects_name = LabelName(
            "Name the output object",
            "RelateObjects",
            doc="""\
Enter the name you want to call the object produced by this module. """,
        )
Example #24
0
    def create_settings(self):
        self.scheme_choice = Choice(
            "Select a color scheme",
            [SCHEME_RGB, SCHEME_CMYK, SCHEME_STACK, SCHEME_COMPOSITE],
            doc="""\
This module can use one of two color schemes to combine images:

-  *%(SCHEME_RGB)s*: Each input image determines the intensity of one
   of the color channels: red, green, and blue.
-  *%(SCHEME_CMYK)s*: Three of the input images are combined to
   determine the colors (cyan, magenta, and yellow) and a fourth is used
   only for brightness. The cyan image adds equally to the green and
   blue intensities. The magenta image adds equally to the red and blue
   intensities. The yellow image adds equally to the red and green
   intensities.
-  *%(SCHEME_STACK)s*: The channels are stacked in the order listed,
   from top to bottom. An arbitrary number of channels is allowed.

   For example, you could create a 5-channel image by providing
   5 grayscale images. The first grayscale image you provide will fill
   the first channel, the second grayscale image you provide will fill
   the second channel, and so on.
-  *%(SCHEME_COMPOSITE)s*: A color is assigned to each grayscale image.
   Each grayscale image is converted to color by multiplying the
   intensity by the color and the resulting color images are added
   together. An arbitrary number of channels can be composited into a
   single color image.
"""
            % globals(),
        )

        self.wants_rescale = Binary(
            "Rescale intensity",
            True,
            doc="""\
Choose whether to rescale each channel individually to 
the range of 0-1. This prevents clipping of channels with intensity 
above 1 and can help to balance the brightness of the different channels. 
This option also ensures that channels occupy the full intensity range 
available, which is useful for displaying images in other software.

This rescaling is applied before any multiplication factors set in this 
module's options. Using a multiplication factor >1 would therefore result 
in clipping."""
        )

        # # # # # # # # # # # # # # # #
        #
        # RGB settings
        #
        # # # # # # # # # # # # # # # #
        self.red_image_name = ImageSubscriber(
            "Select the image to be colored red",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in red.
"""
            % globals(),
        )

        self.green_image_name = ImageSubscriber(
            "Select the image to be colored green",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in green.
"""
            % globals(),
        )

        self.blue_image_name = ImageSubscriber(
            "Select the image to be colored blue",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in blue.
"""
            % globals(),
        )

        self.rgb_image_name = ImageName(
            "Name the output image",
            "ColorImage",
            doc="""Enter a name for the resulting image.""",
        )

        self.red_adjustment_factor = Float(
            "Relative weight for the red image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the red image. If all relative weights are
equal, all three colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.green_adjustment_factor = Float(
            "Relative weight for the green image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the green image. If all relative weights
are equal, all three colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.blue_adjustment_factor = Float(
            "Relative weight for the blue image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the blue image. If all relative weights
are equal, all three colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )
        # # # # # # # # # # # # # #
        #
        # CYMK settings
        #
        # # # # # # # # # # # # # #
        self.cyan_image_name = ImageSubscriber(
            "Select the image to be colored cyan",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in cyan.
"""
            % globals(),
        )

        self.magenta_image_name = ImageSubscriber(
            "Select the image to be colored magenta",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in magenta.
"""
            % globals(),
        )

        self.yellow_image_name = ImageSubscriber(
            "Select the image to be colored yellow",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in yellow.
"""
            % globals(),
        )

        self.gray_image_name = ImageSubscriber(
            "Select the image that determines brightness",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image that will determine each pixel's brightness.
"""
            % globals(),
        )

        self.cyan_adjustment_factor = Float(
            "Relative weight for the cyan image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the cyan image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.magenta_adjustment_factor = Float(
            "Relative weight for the magenta image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the magenta image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.yellow_adjustment_factor = Float(
            "Relative weight for the yellow image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the yellow image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.gray_adjustment_factor = Float(
            "Relative weight for the brightness image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the brightness image. If all relative
weights are equal, all colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        # # # # # # # # # # # # # #
        #
        # Stack settings
        #
        # # # # # # # # # # # # # #

        self.stack_channels = []
        self.stack_channel_count = HiddenCount(self.stack_channels)
        self.add_stack_channel_cb(can_remove=False)
        self.add_stack_channel = DoSomething(
            "Add another channel",
            "Add another channel",
            self.add_stack_channel_cb,
            doc="""\
    Press this button to add another image to the stack.
    """,
        )
Example #25
0
    def create_settings(self):
        self.images_list = ImageListSubscriber(
            "Select images to measure",
            [],
            doc=
            """Select the grayscale images whose intensity you want to measure.""",
        )

        self.objects_list = LabelListSubscriber(
            "Select objects to measure",
            [],
            doc="""\
        Select the objects whose texture you want to measure. If you only want
        to measure the texture for the image overall, you can remove all objects
        using the “Remove this object” button.

        Objects specified here will have their texture measured against *all*
        images specified above, which may lead to image-object combinations that
        are unnecessary. If you do not want this behavior, use multiple
        **MeasureTexture** modules to specify the particular image-object
        measures that you want.
        """,
        )

        self.gray_levels = Integer(
            "Enter how many gray levels to measure the texture at",
            256,
            2,
            256,
            doc="""\
        Enter the number of gray levels (ie, total possible values of intensity) 
        you want to measure texture at.  Measuring at more levels gives you 
        _potentially_ more detailed information about your image, but at the cost
        of somewhat decreased processing speed.  

        Before processing, your image will be rescaled from its current pixel values
        to 0 - [gray levels - 1]. The texture features will then be calculated. 

        In all CellProfiler 2 versions, this value was fixed at 8; in all 
        CellProfiler 3 versions it was fixed at 256.  The minimum number of levels is
        2, the maximum is 256.
        """,
        )

        self.scale_groups = []

        self.scale_count = HiddenCount(self.scale_groups)

        self.image_divider = Divider()

        self.object_divider = Divider()

        self.add_scale(removable=False)

        self.add_scales = DoSomething(
            callback=self.add_scale,
            label="Add another scale",
            text="",
            doc="""\
            Add an additional texture scale to measure. Useful when you
            want to measure texture features of different sizes.
            """,
        )

        self.images_or_objects = Choice(
            "Measure whole images or objects?",
            [IO_IMAGES, IO_OBJECTS, IO_BOTH],
            value=IO_BOTH,
            doc="""\
This setting determines whether the module computes image-wide
measurements, per-object measurements or both.

-  *{IO_IMAGES}:* Select if you only want to measure the texture
   across entire images.
-  *{IO_OBJECTS}:* Select if you want to measure the texture
   on a per-object basis only.
-  *{IO_BOTH}:* Select to make both image and object measurements.
""".format(
                **{
                    "IO_IMAGES": IO_IMAGES,
                    "IO_OBJECTS": IO_OBJECTS,
                    "IO_BOTH": IO_BOTH
                }),
        )
Example #26
0
    def create_settings(self):
        self.blank_image = Binary(
            "Display outlines on a blank image?",
            False,
            doc="""\
Select "*{YES}*" to produce an image of the outlines on a black background.

Select "*{NO}*" to overlay the outlines on an image you choose.
""".format(
                **{"YES": "Yes", "NO": "No"}
            ),
        )

        self.image_name = ImageSubscriber(
            "Select image on which to display outlines",
            "None",
            doc="""\
*(Used only when a blank image has not been selected)*

Choose the image to serve as the background for the outlines. You can
choose from images that were loaded or created by modules previous to
this one.
""",
        )

        self.line_mode = Choice(
            "How to outline",
            ["Inner", "Outer", "Thick"],
            value="Inner",
            doc="""\
Specify how to mark the boundaries around an object:

-  *Inner:* outline the pixels just inside of objects, leaving
   background pixels untouched.
-  *Outer:* outline pixels in the background around object boundaries.
   When two objects touch, their boundary is also marked.
-  *Thick:* any pixel not completely surrounded by pixels of the same
   label is marked as a boundary. This results in boundaries that are 2
   pixels thick.
""",
        )

        self.output_image_name = ImageName(
            "Name the output image",
            "OrigOverlay",
            doc="""\
Enter the name of the output image with the outlines overlaid. This
image can be selected in later modules (for instance, **SaveImages**).
""",
        )

        self.wants_color = Choice(
            "Outline display mode",
            [WANTS_COLOR, WANTS_GRAYSCALE],
            doc="""\
Specify how to display the outline contours around your objects. Color
outlines produce a clearer display for images where the cell borders
have a high intensity, but take up more space in memory. Grayscale
outlines are displayed with either the highest possible intensity or the
same intensity as the brightest pixel in the image.
""",
        )

        self.spacer = Divider(line=False)

        self.max_type = Choice(
            "Select method to determine brightness of outlines",
            [MAX_IMAGE, MAX_POSSIBLE],
            doc="""\
*(Used only when outline display mode is grayscale)*

The following options are possible for setting the intensity
(brightness) of the outlines:

-  *{MAX_IMAGE}:* Set the brightness to the the same as the brightest
   point in the image.
-  *{MAX_POSSIBLE}:* Set to the maximum possible value for this image
   format.

If your image is quite dim, then putting bright white lines onto it may
not be useful. It may be preferable to make the outlines equal to the
maximal brightness already occurring in the image.
""".format(
                **{"MAX_IMAGE": MAX_IMAGE, "MAX_POSSIBLE": MAX_POSSIBLE}
            ),
        )

        self.outlines = []

        self.add_outline(can_remove=False)

        self.add_outline_button = DoSomething(
            "", "Add another outline", self.add_outline
        )
    def create_settings(self):
        super(RunStarDist, self).create_settings()

        self.model = Choice(
            text="Model",
            choices=MODEL_OPTIONS,
            value=GREY_1,
            doc="""\
StarDist comes with models for detecting nuclei. Alternatively, you can supply a custom-trained model 
generated outside of CellProfiler within Python. Custom models can be useful if working with unusual cell types.

The inbuilt fluorescent and DSB models expect greyscale images. The H&E model expects a color image as input (from 
brightfield). Custom models will require images of the type they were trained with. It should be noted that the 
models supplied with StarDist do not support 3D images, but it's possible to train and use your own.
""",
        )

        self.tile_image = Binary(
            text="Tile input image?",
            value=False,
            doc="""\
If enabled, the input image will be broken down into overlapping tiles. 
This can help to conserve memory when working with large images.

The image is split into a set number of vertical and horizontal tiles. 
The total number of tiles will be the result of multiplying the horizontal 
and vertical tile number.""",
        )

        self.n_tiles_x = Integer(text="Horizontal tiles",
                                 value=1,
                                 minval=1,
                                 doc="""\
Specify the number of tiles to break the image down into along the x-axis (horizontal)."""
                                 )

        self.n_tiles_y = Integer(text="Vertical tiles",
                                 value=1,
                                 minval=1,
                                 doc="""\
Specify the number of tiles to break the image down into along the y-axis (vertical)."""
                                 )

        self.save_probabilities = Binary(
            text="Save probability image?",
            value=False,
            doc="""
If enabled, the probability scores from the model will be recorded as a new image. 
Probability scales from 0-1, with 1 representing absolute certainty of a pixel being in a cell. 
You may want to use a custom threshold to manually generate objects.""",
        )

        self.probabilities_name = ImageName(
            "Name the probability image",
            "Probabilities",
            doc=
            "Enter the name you want to call the probability image produced by this module.",
        )

        self.model_directory = Directory("Model folder",
                                         doc=f"""\
*(Used only when using a custom pre-trained model)*

Select the folder containing your StarDist model. This should have the config, threshold and weights files 
exported after training.""")

        self.gpu_test = DoSomething(
            "",
            "Test GPU",
            self.do_check_gpu,
            doc=f"""\
Press this button to check whether a GPU is correctly configured.

If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the 
required dependencies are not installed. 
Make sure you followed the setup instructions here: https://www.tensorflow.org/install/gpu

If you don't have a GPU or it's not configured, StarDist will instead run on the CPU. 
This will be slower but should work on any system.
""",
        )
    def create_settings(self):
        super(RunCellpose, self).create_settings()

        self.expected_diameter = Integer(
            text="Expected object diameter",
            value=15,
            minval=0,
            doc="""\
The average diameter of the objects to be detected. Setting this to 0 will attempt to automatically detect object size.
Note that automatic diameter mode does not work when running on 3D images.

Cellpose models come with a pre-defined object diameter. Your image will be resized during detection to attempt to 
match the diameter expected by the model. The default models have an expected diameter of ~16 pixels, if trying to 
detect much smaller objects it may be more efficient to resize the image first using the Resize module.
""",
        )

        self.mode = Choice(
            text="Detection mode",
            choices=[MODE_NUCLEI, MODE_CELLS, MODE_CUSTOM],
            value=MODE_NUCLEI,
            doc="""\
CellPose comes with models for detecting nuclei or cells. Alternatively, you can supply a custom-trained model 
generated using the command line or Cellpose GUI. Custom models can be useful if working with unusual cell types.
""",
        )

        self.use_gpu = Binary(text="Use GPU",
                              value=False,
                              doc=f"""\
If enabled, Cellpose will attempt to run detection on your system's graphics card (GPU). 
Note that you will need a CUDA-compatible GPU and correctly configured PyTorch version, see this link for details: 
{CUDA_LINK}

If disabled or incorrectly configured, Cellpose will run on your CPU instead. This is much slower but more compatible 
with different hardware setups.

Note that, particularly when in 3D mode, lack of GPU memory can become a limitation. If a model crashes you may need to 
re-start CellProfiler to release GPU memory. Resizing large images prior to running them through the model can free up 
GPU memory.

""")

        self.use_averaging = Binary(text="Use averaging",
                                    value=True,
                                    doc="""\
If enabled, CellPose will run it's 4 inbuilt models and take a consensus to determine the results. If disabled, only a 
single model will be called to produce results. Disabling averaging is faster to run but less accurate."""
                                    )

        self.supply_nuclei = Binary(text="Supply nuclei image as well?",
                                    value=False,
                                    doc="""
When detecting whole cells, you can provide a second image featuring a nuclear stain to assist 
the model with segmentation. This can help to split touching cells.""")

        self.nuclei_image = ImageSubscriber(
            "Select the nuclei image",
            doc="Select the image you want to use as the nuclear stain.")

        self.save_probabilities = Binary(
            text="Save probability image?",
            value=False,
            doc="""
If enabled, the probability scores from the model will be recorded as a new image. 
Probability >0 is considered as being part of a cell. 
You may want to use a higher threshold to manually generate objects.""",
        )

        self.probabilities_name = ImageName(
            "Name the probability image",
            "Probabilities",
            doc=
            "Enter the name you want to call the probability image produced by this module.",
        )

        self.model_directory = Directory(
            "Location of the pre-trained model file",
            doc=f"""\
*(Used only when using a custom pre-trained model)*

Select the location of the pre-trained CellPose model file that will be used for detection."""
        )

        def get_directory_fn():
            """Get the directory for the rules file name"""
            return self.model_directory.get_absolute_path()

        def set_directory_fn(path):
            dir_choice, custom_path = self.model_directory.get_parts_from_path(
                path)

            self.model_directory.join_parts(dir_choice, custom_path)

        self.model_file_name = Filename("Pre-trained model file name",
                                        "cyto_0",
                                        get_directory_fn=get_directory_fn,
                                        set_directory_fn=set_directory_fn,
                                        doc=f"""\
*(Used only when using a custom pre-trained model)*

This file can be generated by training a custom model withing the CellPose GUI or command line applications."""
                                        )

        self.gpu_test = DoSomething(
            "",
            "Test GPU",
            self.do_check_gpu,
            doc=f"""\
Press this button to check whether a GPU is correctly configured.

If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the 
required dependencies are not installed.

If you have multiple GPUs on your system, this button will only test the first one.
""",
        )

        self.flow_threshold = Float(
            text="Flow threshold",
            value=0.4,
            minval=0,
            doc=
            """Flow error threshold. All cells with errors below this threshold are kept. Recommended default is 0.4""",
        )

        self.dist_threshold = Float(
            text="Cell probability threshold",
            value=0.0,
            minval=0,
            doc=f"""\
Cell probability threshold (all pixels with probability above threshold kept for masks). Recommended default is 0.0. """,
        )
Example #29
0
    def create_settings(self):
        self.object_name = LabelSubscriber(
            "Select the input objects",
            "None",
            doc="Select the objects that you want to expand or shrink.",
        )

        self.output_object_name = LabelName(
            "Name the output objects",
            "ShrunkenNuclei",
            doc="Enter a name for the resulting objects.",
        )

        self.operation = Choice(
            "Select the operation",
            O_ALL,
            doc="""\
Choose the operation that you want to perform:

-  *{O_SHRINK_INF}:* Remove all pixels but one from filled objects.
   Thin objects with holes to loops unless the “fill” option is checked.
   Objects are never lost using this module (shrinking stops when an
   object becomes a single pixel).
-  *{O_EXPAND_INF}:* Expand objects, assigning every pixel in the
   image to an object. Background pixels are assigned to the nearest
   object.
-  *{O_DIVIDE}:* Remove pixels from an object that are adjacent to
   another object’s pixels unless doing so would change the object’s
   Euler number (break an object in two, remove the object completely or
   open a hole in an object).
-  *{O_SHRINK}:* Remove pixels around the perimeter of an object unless
   doing so would change the object’s Euler number (break the object in
   two, remove the object completely or open a hole in the object). You
   can specify the number of times perimeter pixels should be removed.
   Processing stops automatically when there are no more pixels to
   remove. Objects are never lost using this module (shrinking
   stops when an object becomes a single pixel).
-  *{O_EXPAND}:* Expand each object by adding background pixels
   adjacent to the image. You can choose the number of times to expand.
   Processing stops automatically if there are no more background
   pixels.
-  *{O_SKELETONIZE}:* Erode each object to its skeleton.
-  *{O_SPUR}:* Remove or reduce the length of spurs in a skeletonized
   image. The algorithm reduces spur size by the number of pixels
   indicated in the setting *Number of pixels by which to expand or
   shrink*.
""".format(
                **{
                    "O_DIVIDE": O_DIVIDE,
                    "O_EXPAND": O_EXPAND,
                    "O_EXPAND_INF": O_EXPAND_INF,
                    "O_SHRINK": O_SHRINK,
                    "O_SHRINK_INF": O_SHRINK_INF,
                    "O_SKELETONIZE": O_SKELETONIZE,
                    "O_SPUR": O_SPUR,
                }),
        )

        self.iterations = Integer(
            "Number of pixels by which to expand or shrink",
            1,
            minval=1,
            doc="""\
*(Used only if "{O_SHRINK}", "{O_EXPAND}", or "{O_SPUR}" is selected)*

Specify the number of pixels to add or remove from object borders.
""".format(**{
                "O_EXPAND": O_EXPAND,
                "O_SHRINK": O_SHRINK,
                "O_SPUR": O_SPUR
            }),
        )

        self.wants_fill_holes = Binary(
            "Fill holes in objects so that all objects shrink to a single point?",
            False,
            doc="""\
*(Used only if one of the “Shrink” options selected)*

Select *{YES}* to ensure that each object will shrink to a single
point, by filling the holes in each object.

Select *{NO}* to preserve the Euler number. In this case, the shrink
algorithm preserves each object’s Euler number, which means that it will
erode an object with a hole to a ring in order to keep the hole. An
object with two holes will be shrunk to two rings connected by a line in
order to keep from breaking up the object or breaking the hole.
""".format(**{
                "NO": "No",
                "YES": "Yes"
            }),
        )
    def create_settings(self):
        """Create your settings by subclassing this function

        create_settings is called at the end of initialization.
        """
        self.grid_name = GridSubscriber(
            "Select the defined grid",
            "None",
            doc=
            """Select the name of a grid created by a previous **DefineGrid** module.""",
        )

        self.output_objects_name = LabelName(
            "Name the objects to be identified",
            "Wells",
            doc="""\
Enter the name of the grid objects identified by this module. These objects
will be available for further measurement and processing in subsequent modules.""",
        )

        self.shape_choice = Choice(
            "Select object shapes and locations",
            [
                SHAPE_RECTANGLE, SHAPE_CIRCLE_FORCED, SHAPE_CIRCLE_NATURAL,
                SHAPE_NATURAL
            ],
            doc="""\
Use this setting to choose the method to be used to determine the grid
objects’ shapes and locations:

-  *%(SHAPE_RECTANGLE)s:* Each object will be created as a rectangle,
   completely occupying the entire grid compartment (rectangle). This
   option creates the rectangular objects based solely on the grid’s
   specifications, not on any previously identified guiding objects.
-  *%(SHAPE_CIRCLE_FORCED)s:* Each object will be created as a circle,
   centered in the middle of each grid compartment. This option places
   the circular objects’ locations based solely on the grid’s
   specifications, not on any previously identified guiding objects. The
   radius of all circles in a grid will be constant for the entire grid
   in each image cycle, and can be determined automatically for each
   image cycle based on the average radius of previously identified
   guiding objects for that image cycle, or instead it can be specified
   as a single radius for all circles in all grids in the entire
   analysis run.
-  *%(SHAPE_CIRCLE_NATURAL)s:* Each object will be created as a
   circle, and each circle’s location within its grid compartment will
   be determined based on the location of any previously identified
   guiding objects within that grid compartment. Thus, if a guiding
   object lies within a particular grid compartment, that object’s
   center will be the center of the created circular object. If no
   guiding objects lie within a particular grid compartment, the
   circular object is placed within the center of that grid compartment.
   If more than one guiding object lies within the grid compartment,
   they will be combined and the centroid of this combined object will
   be the location of the created circular object. Note that guiding
   objects whose centers are close to the grid edge are ignored.
-  *%(SHAPE_NATURAL)s:* Within each grid compartment, the object will
   be identified based on combining all of the parts of guiding objects,
   if any, that fall within the grid compartment. Note that guiding
   objects whose centers are close to the grid edge are ignored. If a
   guiding object does not exist within a grid compartment, an object
   consisting of one single pixel in the middle of the grid compartment
   will be created.
""" % globals(),
        )

        self.diameter_choice = Choice(
            "Specify the circle diameter automatically?",
            [AM_AUTOMATIC, AM_MANUAL],
            doc="""\
*(Used only if "Circle" is selected as object shape)*

There are two methods for selecting the circle diameter:

-  *%(AM_AUTOMATIC)s:* Uses the average diameter of previously
   identified guiding objects as the diameter.
-  *%(AM_MANUAL)s:* Lets you specify the diameter directly, as a
   number.
""" % globals(),
        )

        self.diameter = Integer(
            "Circle diameter",
            20,
            minval=2,
            doc="""\
*(Used only if "Circle" is selected as object shape and diameter is
specified manually)*

Enter the diameter to be used for each grid circle, in pixels.
{dist}
""".format(dist=HELP_ON_MEASURING_DISTANCES),
        )

        self.guiding_object_name = LabelSubscriber(
            "Select the guiding objects",
            "None",
            doc="""\
*(Used only if "Circle" is selected as object shape and diameter is
specified automatically, or if "Natural Location" is selected as the
object shape)*

Select the names of previously identified objects that will be used to
guide the shape and/or location of the objects created by this module,
depending on the method chosen.
""",
        )