コード例 #1
0
ファイル: resize.py プロジェクト: yaweiyang-sz/CellProfiler
    def add_image(self, can_remove=True):
        group = SettingsGroup()

        if can_remove:
            group.append("divider", Divider(line=False))

        group.append(
            "input_image_name",
            ImageSubscriber(
                "Select the additional image?",
                "None",
                doc="""\
What is the name of the additional image to resize? This image will be
resized with the same settings as the first image.""",
            ),
        )

        group.append(
            "output_image_name",
            ImageName(
                "Name the output image",
                "ResizedBlue",
                doc="What is the name of the additional resized image?",
            ),
        )

        if can_remove:
            group.append(
                "remover",
                RemoveSettingButton(
                    "", "Remove above image", self.additional_images, group
                ),
            )

        self.additional_images.append(group)
コード例 #2
0
ファイル: morph.py プロジェクト: yaweiyang-sz/CellProfiler
    def create_settings(self):
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="""\
Select the image that you want to perform a morphological operation on.
A grayscale image can be converted to binary using the **Threshold**
module. Objects can be converted to binary using the **ConvertToImage**
module.""",
        )

        self.output_image_name = ImageName(
            "Name the output image",
            "MorphBlue",
            doc="""Enter the name for the output image. It will be of the same type as the input image.""",
        )

        self.add_button = DoSomething(
            "",
            "Add another operation",
            self.add_function,
            doc="""\
Press this button to add an operation that will be applied to the
image resulting from the previous operation(s). The module repeats
the previous operation the number of times you select before applying
the operation added by this button.""",
        )

        self.functions = []
        self.add_function(can_remove=False)
コード例 #3
0
    def add_channel(self, can_remove=True):
        """Add another channel to the channels list"""
        group = SettingsGroup()
        group.can_remove = can_remove
        group.append(
            "channel_choice",
            Integer(
                text="Channel number",
                value=len(self.channels) + 1,
                minval=1,
                doc="""\
*(Used only when splitting images)*

This setting chooses a channel to be processed. For example, *1*
is the first
channel in a .TIF or the red channel in a traditional image file.
*2* and *3* are the second and third channels of a TIF or
the green and blue channels in other formats. *4* is the
transparency channel for image formats that support transparency and is
channel # 4 for a .TIF file. **ColorToGray** will fail to process an
image if you select a channel that is not supported by that image, for
example, “5” for a three-channel .PNG file.""",
            ),
        )

        group.append(
            "contribution",
            Float(
                "Relative weight of the channel",
                1,
                0,
                doc="""\
*(Used only when combining channels)*

Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.""",
            ),
        )

        group.append(
            "image_name",
            ImageName(
                "Image name",
                value="Channel%d" % (len(self.channels) + 1),
                doc="""\
*(Used only when splitting images)*

Select the name of the output grayscale image.""",
            ),
        )

        if group.can_remove:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this channel", self.channels, group),
            )
        self.channels.append(group)
コード例 #4
0
    def create_settings(self):
        self.object_name = LabelSubscriber(
            "Select the input objects",
            "None",
            doc=
            "Choose the name of the objects you want to convert to an image.",
        )

        self.image_name = ImageName(
            "Name the output image",
            "CellImage",
            doc="Enter the name of the resulting image.",
        )

        self.image_mode = Choice(
            "Select the color format",
            ["Color", "Binary (black & white)", "Grayscale", "uint16"],
            doc="""\
Select which colors the resulting image should use. You have the
following options:

-  *Color:* Allows you to choose a colormap that will produce jumbled
   colors for your objects.
-  *Binary (black & white):* All object pixels will be assigned 1 and
   all background pixels will be assigned 0, creating a binary image.
-  *Grayscale:* Assigns all background pixels to 0 and assigns each object's pixels with a number 
   specific to that object. Object numbers can range from 1 to 255 (the maximum value that you can put
   in an 8-bit integer, use **uint16** if you expect more than 255 objects).
   This creates an image where objects in the top left corner of the image are
   very dark and the colors progress to white toward the bottom right corner of the image.
   Use **SaveImages** to save the resulting image as a .npy file or .tiff file if you want
   to process the label matrix image using another program or in a separate CellProfiler pipeline.
-  *uint16:* Assigns all background pixels to 0 and assigns each object's pixels with a number 
   specific to that object. Object numbers can range from 1 to 65535 (the maximum value that you can put
   in a 16-bit integer). This creates an image where objects in the top left corner of the image are
   very dark and where the colors progress to white toward the bottom right corner of the image
   (though this can usually only be seen in a scientific image viewer since standard image viewers only
   handle 8-bit images). Use **SaveImages** to save the resulting image as a .npy file or
   **16-bit** (not 8-bit!) .tiff file if you want to process the label matrix image using another
   program or in a separate CellProfiler pipeline.

You can choose *Color* with a *Gray* colormap to produce jumbled gray
objects.
            """,
        )

        self.colormap = Colormap(
            "Select the colormap",
            doc="""\
*(Used only if "Color" output image selected)*

Choose the colormap to be used, which affects how the objects are
colored. You can look up your default colormap under *File >
Preferences*.
""",
        )
コード例 #5
0
    def create_settings(self):
        self.input_image_name = ImageSubscriber(
            "Image", doc="Select the image you want to use.")

        self.template_name = Pathname(
            "Template",
            doc=
            "Specify the location of the cropped image you want to use as a template.",
        )

        self.output_image_name = ImageName(
            "Output",
            doc=
            "Enter the name you want to call the image produced by this module.",
        )
コード例 #6
0
    def add_image_out(self, can_delete=True):
        """Add an image to the image_groups collection
        can_delete - set this to False to keep from showing the "remove"
                     button for images that must be present.
        """
        group = SettingsGroup()
        if can_delete:
            group.append("divider", Divider(line=False))
        group.append(
            "input_filename",
            Text(
                "What is the image filename CellProfiler should load?",
                "None.tiff",
                doc=
                "Enter the image filename CellProfiler should load. This should be set to the output filename "
                "written in the macro file. The image written by the macro will be saved in a temporary directory "
                "and read by CellProfiler."),
        )

        group.append(
            "image_name",
            ImageName(
                r'What should CellProfiler call the loaded image?',
                "None",
                doc=
                'Enter a name to assign to the new image loaded by CellProfiler. This image will be added to your '
                'workspace. '))

        if len(self.image_groups_out
               ) == 0:  # Insert space between 1st two images for aesthetics
            group.append("extra_divider", Divider(line=False))

        if can_delete:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this image",
                                    self.image_groups_out, group))

        self.image_groups_out.append(group)
コード例 #7
0
    def create_settings(self):
        self.synapsin_image = ImageSubscriber(
            "Select the synapsin image", "None", doc="""\
Select the image of the synapsin-1 channel.""")

        self.PSD95_image = ImageSubscriber(
            "Select the PSD95 image", "None", doc="""\
Select the image of the PSD95 channel.""")

        self.vGlut_image = ImageSubscriber(
            "Select the vGlut image", "None", doc="""\
Select the image of the vGlut channel.""")

        self.prediction_image_name = ImageName(
            "Output image name",
            "SynapsePrediction",
            doc="""\
Enter the name to give the output prediction image created by this module.
""")
        self.t7_name = Pathname(
            "Trained network location",
            doc="Specify the location of the trained network."
        )
コード例 #8
0
    def create_settings(self):
        super(RunCellpose, self).create_settings()

        self.expected_diameter = Integer(
            text="Expected object diameter",
            value=15,
            minval=0,
            doc="""\
The average diameter of the objects to be detected. Setting this to 0 will attempt to automatically detect object size.
Note that automatic diameter mode does not work when running on 3D images.

Cellpose models come with a pre-defined object diameter. Your image will be resized during detection to attempt to 
match the diameter expected by the model. The default models have an expected diameter of ~16 pixels, if trying to 
detect much smaller objects it may be more efficient to resize the image first using the Resize module.
""",
        )

        self.mode = Choice(
            text="Detection mode",
            choices=[MODE_NUCLEI, MODE_CELLS, MODE_CUSTOM],
            value=MODE_NUCLEI,
            doc="""\
CellPose comes with models for detecting nuclei or cells. Alternatively, you can supply a custom-trained model 
generated using the command line or Cellpose GUI. Custom models can be useful if working with unusual cell types.
""",
        )

        self.use_gpu = Binary(text="Use GPU",
                              value=False,
                              doc=f"""\
If enabled, Cellpose will attempt to run detection on your system's graphics card (GPU). 
Note that you will need a CUDA-compatible GPU and correctly configured PyTorch version, see this link for details: 
{CUDA_LINK}

If disabled or incorrectly configured, Cellpose will run on your CPU instead. This is much slower but more compatible 
with different hardware setups.

Note that, particularly when in 3D mode, lack of GPU memory can become a limitation. If a model crashes you may need to 
re-start CellProfiler to release GPU memory. Resizing large images prior to running them through the model can free up 
GPU memory.

""")

        self.use_averaging = Binary(text="Use averaging",
                                    value=True,
                                    doc="""\
If enabled, CellPose will run it's 4 inbuilt models and take a consensus to determine the results. If disabled, only a 
single model will be called to produce results. Disabling averaging is faster to run but less accurate."""
                                    )

        self.supply_nuclei = Binary(text="Supply nuclei image as well?",
                                    value=False,
                                    doc="""
When detecting whole cells, you can provide a second image featuring a nuclear stain to assist 
the model with segmentation. This can help to split touching cells.""")

        self.nuclei_image = ImageSubscriber(
            "Select the nuclei image",
            doc="Select the image you want to use as the nuclear stain.")

        self.save_probabilities = Binary(
            text="Save probability image?",
            value=False,
            doc="""
If enabled, the probability scores from the model will be recorded as a new image. 
Probability >0 is considered as being part of a cell. 
You may want to use a higher threshold to manually generate objects.""",
        )

        self.probabilities_name = ImageName(
            "Name the probability image",
            "Probabilities",
            doc=
            "Enter the name you want to call the probability image produced by this module.",
        )

        self.model_directory = Directory(
            "Location of the pre-trained model file",
            doc=f"""\
*(Used only when using a custom pre-trained model)*

Select the location of the pre-trained CellPose model file that will be used for detection."""
        )

        def get_directory_fn():
            """Get the directory for the rules file name"""
            return self.model_directory.get_absolute_path()

        def set_directory_fn(path):
            dir_choice, custom_path = self.model_directory.get_parts_from_path(
                path)

            self.model_directory.join_parts(dir_choice, custom_path)

        self.model_file_name = Filename("Pre-trained model file name",
                                        "cyto_0",
                                        get_directory_fn=get_directory_fn,
                                        set_directory_fn=set_directory_fn,
                                        doc=f"""\
*(Used only when using a custom pre-trained model)*

This file can be generated by training a custom model withing the CellPose GUI or command line applications."""
                                        )

        self.gpu_test = DoSomething(
            "",
            "Test GPU",
            self.do_check_gpu,
            doc=f"""\
Press this button to check whether a GPU is correctly configured.

If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the 
required dependencies are not installed.

If you have multiple GPUs on your system, this button will only test the first one.
""",
        )

        self.flow_threshold = Float(
            text="Flow threshold",
            value=0.4,
            minval=0,
            doc=
            """Flow error threshold. All cells with errors below this threshold are kept. Recommended default is 0.4""",
        )

        self.dist_threshold = Float(
            text="Cell probability threshold",
            value=0.0,
            minval=0,
            doc=f"""\
Cell probability threshold (all pixels with probability above threshold kept for masks). Recommended default is 0.0. """,
        )
コード例 #9
0
ファイル: smooth.py プロジェクト: yaweiyang-sz/CellProfiler
    def create_settings(self):
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="""Select the image to be smoothed.""",
        )

        self.filtered_image_name = ImageName(
            "Name the output image",
            "FilteredImage",
            doc="""Enter a name for the resulting image.""",
        )

        self.smoothing_method = Choice(
            "Select smoothing method",
            [
                FIT_POLYNOMIAL,
                GAUSSIAN_FILTER,
                MEDIAN_FILTER,
                SMOOTH_KEEPING_EDGES,
                CIRCULAR_AVERAGE_FILTER,
                SM_TO_AVERAGE,
            ],
            doc="""\
This module smooths images using one of several filters. Fitting a
polynomial is fastest but does not allow a very tight fit compared to
the other methods:

-  *%(FIT_POLYNOMIAL)s:* This method is fastest but does not allow
   a very tight “fit” compared to the other methods. Thus, it will usually be less
   accurate. The method treats the intensity of the image
   pixels as a polynomial function of the x and y position of each
   pixel. It fits the intensity to the polynomial, *A x* :sup:`2` *+ B
   y* :sup:`2` *+ C xy + D x + E y + F*. This will produce a smoothed
   image with a single peak or trough of intensity that tapers off
   elsewhere in the image. For many microscopy images (where the
   illumination of the lamp is brightest in the center of field of
   view), this method will produce an image with a bright central region
   and dimmer edges. But, in some cases the peak/trough of the
   polynomial may actually occur outside of the image itself.
-  *%(GAUSSIAN_FILTER)s:* This method convolves the image with a
   Gaussian whose full width at half maximum is the artifact diameter
   entered. Its effect is to blur and obscure features smaller than the
   specified diameter and spread bright or dim features larger than the
   specified diameter.
-  *%(MEDIAN_FILTER)s:* This method finds the median pixel value within
   the diameter you specify. It removes bright or dim features
   that are significantly smaller than the specified diameter.
-  *%(SMOOTH_KEEPING_EDGES)s:* This method uses a bilateral filter
   which limits Gaussian smoothing across an edge while applying
   smoothing perpendicular to an edge. The effect is to respect edges in
   an image while smoothing other features. *%(SMOOTH_KEEPING_EDGES)s*
   will filter an image with reasonable speed for artifact diameters
   greater than 10 and for intensity differences greater than 0.1. The
   algorithm will consume more memory and operate more slowly as you
   lower these numbers.
-  *%(CIRCULAR_AVERAGE_FILTER)s:* This method convolves the image with
   a uniform circular averaging filter whose size is the artifact
   diameter entered. This filter is useful for re-creating an
   out-of-focus blur to an image.
-  *%(SM_TO_AVERAGE)s:* Creates a flat, smooth image where every pixel
   of the image equals the average value of the original image.

*Note, when deciding between %(MEDIAN_FILTER)s and %(GAUSSIAN_FILTER)s
we typically recommend
%(MEDIAN_FILTER)s over %(GAUSSIAN_FILTER)s because the
median is less sensitive to outliers, although the results are also
slightly less smooth and the fact that images are in the range of 0
to 1 means that outliers typically will not dominate too strongly
anyway.*
"""
            % globals(),
        )

        self.wants_automatic_object_size = Binary(
            "Calculate artifact diameter automatically?",
            True,
            doc="""\
*(Used only if “%(GAUSSIAN_FILTER)s”, “%(MEDIAN_FILTER)s”, “%(SMOOTH_KEEPING_EDGES)s” or “%(CIRCULAR_AVERAGE_FILTER)s” is selected)*

Select *Yes* to choose an artifact diameter based on the size of
the image. The minimum size it will choose is 30 pixels, otherwise the
size is 1/40 of the size of the image.

Select *No* to manually enter an artifact diameter.
"""
            % globals(),
        )

        self.object_size = Float(
            "Typical artifact diameter",
            16.0,
            doc="""\
*(Used only if choosing the artifact diameter automatically is set to
“No”)*

Enter the approximate diameter (in pixels) of the features to be blurred
by the smoothing algorithm. This value is used to calculate the size of
the spatial filter. {} For most
smoothing methods, selecting a diameter over ~50 will take substantial
amounts of time to process.
""".format(
                HELP_ON_MEASURING_DISTANCES
            ),
        )

        self.sigma_range = Float(
            "Edge intensity difference",
            0.1,
            doc="""\
*(Used only if “{smooth_help}” is selected)*

Enter the intensity step (which indicates an edge in an image) that you
want to preserve. Edges are locations where the intensity changes
precipitously, so this setting is used to adjust the rough magnitude of
these changes. A lower number will preserve weaker edges. A higher
number will preserve only stronger edges. Values should be between zero
and one. {pixel_help}
""".format(
                smooth_help=SMOOTH_KEEPING_EDGES, pixel_help=HELP_ON_PIXEL_INTENSITIES
            ),
        )

        self.clip = Binary(
            "Clip intensities to 0 and 1?",
            True,
            doc="""\
*(Used only if "{fit}" is selected)*

The *{fit}* method is the only smoothing option that can
yield an output image whose values are outside of the values of the
input image. This setting controls whether to limit the image
intensity to the 0 - 1 range used by CellProfiler.

Select *Yes* to set all output image pixels less than zero to zero
and all pixels greater than one to one.

Select *No* to allow values less than zero and greater than one in
the output image.
""".format(
                fit=FIT_POLYNOMIAL
            ),
        )
コード例 #10
0
ファイル: definegrid.py プロジェクト: zwdiscover/CellProfiler
    def create_settings(self):
        """Create your settings by subclassing this function

        create_settings is called at the end of initialization.
        """
        self.grid_image = GridName(
            "Name the grid",
            doc="""\
This is the name of the grid. You can use this name to
retrieve the grid in subsequent modules.""",
        )

        self.grid_rows = Integer(
            "Number of rows",
            8,
            1,
            doc="""Along the height of the grid, define the number of rows.""",
        )

        self.grid_columns = Integer(
            "Number of columns",
            12,
            1,
            doc=
            """Along the width of the grid, define the number of columns.""",
        )

        self.origin = Choice(
            "Location of the first spot",
            [NUM_TOP_LEFT, NUM_BOTTOM_LEFT, NUM_TOP_RIGHT, NUM_BOTTOM_RIGHT],
            doc="""\
Grid cells are numbered consecutively; this option identifies the
origin for the numbering system and the direction for numbering.
For instance, if you choose "*%(NUM_TOP_LEFT)s*", the top left cell is
cell #1 and cells to the right and bottom are indexed with
larger numbers.""" % globals(),
        )

        self.ordering = Choice(
            "Order of the spots",
            [NUM_BY_ROWS, NUM_BY_COLUMNS],
            doc="""\
Grid cells can either be numbered by rows, then columns or by columns,
then rows. For instance, if you asked to start numbering a 96-well
plate at the top left (by specifying the location of the first spot), then:

-  *%(NUM_BY_ROWS)s:* this option will give well A01 the index 1, B01
   the index 2, and so on up to H01 which receives the index 8. Well A02
   will be assigned the index 9.
-  *%(NUM_BY_COLUMNS)s:* with this option, the well A02 will be
   assigned 2, well A12 will be assigned 12 and well B01 will be
   assigned 13.
""" % globals(),
        )

        self.each_or_once = Choice(
            "Define a grid for which cycle?",
            [EO_EACH, EO_ONCE],
            doc="""\
The setting allows you choose when you want to define a new grid:

-  *%(EO_ONCE)s:* If all of your images are perfectly aligned with each
   other (due to very consistent image acquisition, consistent grid
   location within the plate, and/or automatic cropping precisely within
   each plate), you can define the location of the marker spots once for
   all of the image cycles.
-  *%(EO_EACH)s:* If the location of the grid will vary from one image
   cycle to the next then you should define the location of the marker
   spots for each cycle independently.
""" % globals(),
        )

        self.auto_or_manual = Choice(
            "Select the method to define the grid",
            [AM_AUTOMATIC, AM_MANUAL],
            doc="""\
Select whether you would like to define the grid automatically (based on
objects you have identified in a previous module) or manually. This
setting controls how the grid is defined:

-  *%(AM_MANUAL)s:* In manual mode, you manually indicate known
   locations of marker spots in the grid and have the rest of the
   positions calculated from those marks, no matter what the image
   itself looks like. You can define the grid either by clicking on the
   image with a mouse or by entering coordinates.
-  *%(AM_AUTOMATIC)s:* If you would like the grid to be defined
   automatically, an **IdentifyPrimaryObjects** module must be run prior
   to this module to identify the objects that will be used to define
   the grid. The left-most, right-most, top-most, and bottom-most object
   will be used to define the edges of the grid, and the rows and
   columns will be evenly spaced between these edges. Note that
   Automatic mode requires that the incoming objects are nicely defined:
   for example, if there is an object at the edge of the images that is
   not really an object that ought to be in the grid, a skewed grid will
   result. You might wish to use a **FilterObjects** module to clean up
   badly identified objects prior to defining the grid. If the spots are
   slightly out of alignment with each other from one image cycle to the
   next, this allows the identification to be a bit flexible and adapt
   to the real location of the spots.
""" % globals(),
        )

        self.object_name = LabelSubscriber(
            "Select the previously identified objects",
            "None",
            doc="""\
*(Used only if you selected "%(AM_AUTOMATIC)s" to define the grid)*

Select the previously identified objects you want to use to define the
grid. Use this setting to specify the name of the objects that will be
used to define the grid.
""" % globals(),
        )

        self.manual_choice = Choice(
            "Select the method to define the grid manually",
            [MAN_MOUSE, MAN_COORDINATES],
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" to define the grid)*

Specify whether you want to define the grid using the mouse or by
entering the coordinates of the cells.

-  *%(MAN_MOUSE)s:* The user interface displays the image you specify.
   You will be asked to click in the center of two of the grid cells and
   specify the row and column for each. The grid coordinates will be
   computed from this information.
-  *%(MAN_COORDINATES)s:* Enter the X and Y coordinates of the grid
   cells directly. You can display an image of your grid to find the
   locations of the centers of the cells, then enter the X and Y
   position and cell coordinates for each of two cells.
""" % globals(),
        )

        self.manual_image = ImageSubscriber(
            "Select the image to display when drawing",
            "None",
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" and "%(MAN_MOUSE)s" to define
the grid)*

Specify the image you want to display when defining the grid. This
setting lets you choose the image to display in the grid definition user
interface.
""" % globals(),
        )

        self.first_spot_coordinates = Coordinates(
            "Coordinates of the first cell",
            (0, 0),
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" and "%(MAN_COORDINATES)s" to
define the grid)*

Enter the coordinates of the first cell on your grid. This setting
defines the location of the first of two cells in your grid. You should
enter the coordinates of the center of the cell. You can display an
image of your grid and use the pixel coordinate display to determine the
coordinates of the center of your cell.
""" % globals(),
        )

        self.first_spot_row = Integer(
            "Row number of the first cell",
            1,
            minval=1,
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" and "%(MAN_COORDINATES)s" to
define the grid)*

Enter the row index for the first cell here. Rows are numbered starting
at the origin. For instance, if you chose "*%(NUM_TOP_LEFT)s*" as your
origin, well A01 will be row number 1 and H01 will be row number 8. If
you chose "*%(NUM_BOTTOM_LEFT)s*", A01 will be row number 8 and H01 will
be row number 12.
""" % globals(),
        )

        self.first_spot_col = Integer(
            "Column number of the first cell",
            1,
            minval=1,
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" and "%(MAN_COORDINATES)s" to
define the grid)*

Enter the column index for the first cell here. Columns are numbered
starting at the origin. For instance, if you chose "*%(NUM_TOP_LEFT)s*"
as your origin, well A01 will be column number *1* and A12 will be
column number *12*. If you chose "*%(NUM_TOP_RIGHT)s*", A01 and A12 will
be *12* and *1*, respectively.
""" % globals(),
        )

        self.second_spot_coordinates = Coordinates(
            "Coordinates of the second cell",
            (0, 0),
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" and "%(MAN_COORDINATES)s" to
define the grid)*

This setting defines the location of the second of two cells in your
grid. You should enter the coordinates of the center of the cell. You
can display an image of your grid and use the pixel coordinate
display to determine the coordinates (X,Y) of the center of your cell.
""" % globals(),
        )

        self.second_spot_row = Integer(
            "Row number of the second cell",
            1,
            minval=1,
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" and "%(MAN_COORDINATES)s" to
define the grid)*

Enter the row index for the second cell here. Rows are numbered starting
at the origin. For instance, if you chose "*%(NUM_TOP_LEFT)s*" as your
origin, well A01 will be row number 1 and H01 will be row number 8. If
you chose "*%(NUM_BOTTOM_LEFT)s*", A01 will be row number 8 and H01 will
be row number 12.
""" % globals(),
        )

        self.second_spot_col = Integer(
            "Column number of the second cell",
            1,
            minval=1,
            doc="""\
*(Used only if you selected "%(AM_MANUAL)s" and "%(MAN_COORDINATES)s" to
define the grid)*

Enter the column index for the second cell here. Columns are numbered
starting at the origin. For instance, if you chose "*%(NUM_TOP_LEFT)s*"
as your origin, well A01 will be column number 1 and A12 will be column
number 12. If you chose "*%(NUM_TOP_RIGHT)s*", A01 and A12 will be 12
and 1, respectively.
""" % globals(),
        )

        self.wants_image = Binary(
            "Retain an image of the grid?",
            False,
            doc="""\
Select "*Yes*" to retain an image of the grid for use later in the
pipeline. This module can create an annotated image of the grid that can
be saved using the **SaveImages** module.
""" % globals(),
        )

        self.display_image_name = ImageSubscriber(
            "Select the image on which to display the grid",
            "Leave blank",
            can_be_blank=True,
            doc="""\
*(Used only if saving an image of the grid)*

Enter the name of the image that should be used as the background for
annotations (grid lines and grid indexes). This image will be used for
the figure and for the saved image.
""",
        )

        self.save_image_name = ImageName(
            "Name the output image",
            "Grid",
            doc="""\
*(Used only if retaining an image of the grid for use later in the
pipeline)*

Enter the name you want to use for the output image. You can save this
image using the **SaveImages** module.
""",
        )

        self.failed_grid_choice = Choice(
            "Use a previous grid if gridding fails?",
            [FAIL_NO, FAIL_ANY_PREVIOUS, FAIL_FIRST],
            doc="""\
If the gridding fails, this setting allows you to control how the module
responds to the error:

-  *%(FAIL_NO)s:* The module will stop the pipeline if gridding fails.
-  *%(FAIL_ANY_PREVIOUS)s:* The module will use the the most recent
   successful gridding.
-  *%(FAIL_FIRST)s:* The module will use the first gridding.

Note that the pipeline will stop in all cases if gridding fails on the
first image.
""" % globals(),
        )
コード例 #11
0
    def add_image(self, can_remove=True):
        group = SettingsGroup()
        group.can_remove = can_remove
        if can_remove:
            group.append("divider", Divider())
        idx = len(self.outputs)
        default_name = STAINS_BY_POPULARITY[idx % len(STAINS_BY_POPULARITY)]
        default_name = default_name.replace(" ", "")

        group.append(
            "image_name",
            ImageName(
                "Name the output image",
                default_name,
                doc="""\
Use this setting to name one of the images produced by the
module for a particular stain. The image can be used in
subsequent modules in the pipeline.
""",
            ),
        )

        choices = list(sorted(STAIN_DICTIONARY.keys())) + [CHOICE_CUSTOM]

        group.append(
            "stain_choice",
            Choice(
                "Stain",
                choices=choices,
                doc="""\
Use this setting to choose the absorbance values for a particular stain.

The stains are:

|Unmix_image0|

(Information taken from `here`_,
`here <http://en.wikipedia.org/wiki/Staining>`__, and
`here <http://stainsfile.info>`__.)
You can choose *{CHOICE_CUSTOM}* and enter your custom values for the
absorbance (or use the estimator to determine values from single-stain
images).

.. _here: http://en.wikipedia.org/wiki/Histology#Staining
.. |Unmix_image0| image:: {UNMIX_COLOR_CHART}

""".format(
                    **{
                        "UNMIX_COLOR_CHART":
                        cellprofiler.gui.help.content.image_resource(
                            "UnmixColors.png"),
                        "CHOICE_CUSTOM":
                        CHOICE_CUSTOM,
                    }),
            ),
        )

        group.append(
            "red_absorbance",
            Float(
                "Red absorbance",
                0.5,
                0,
                1,
                doc="""\
*(Used only if "%(CHOICE_CUSTOM)s" is selected for the stain)*

The red absorbance setting estimates the dye’s absorbance of light in
the red channel.You should enter a value between 0 and 1 where 0 is no
absorbance and 1 is complete absorbance. You can use the estimator to
calculate this value automatically.
""" % globals(),
            ),
        )

        group.append(
            "green_absorbance",
            Float(
                "Green absorbance",
                0.5,
                0,
                1,
                doc="""\
*(Used only if "%(CHOICE_CUSTOM)s" is selected for the stain)*

The green absorbance setting estimates the dye’s absorbance of light in
the green channel. You should enter a value between 0 and 1 where 0 is
no absorbance and 1 is complete absorbance. You can use the estimator to
calculate this value automatically.
""" % globals(),
            ),
        )

        group.append(
            "blue_absorbance",
            Float(
                "Blue absorbance",
                0.5,
                0,
                1,
                doc="""\
*(Used only if "%(CHOICE_CUSTOM)s" is selected for the stain)*

The blue absorbance setting estimates the dye’s absorbance of light in
the blue channel. You should enter a value between 0 and 1 where 0 is no
absorbance and 1 is complete absorbance. You can use the estimator to
calculate this value automatically.
""" % globals(),
            ),
        )

        def on_estimate():
            result = self.estimate_absorbance()
            if result is not None:
                (
                    group.red_absorbance.value,
                    group.green_absorbance.value,
                    group.blue_absorbance.value,
                ) = result

        group.append(
            "estimator_button",
            DoSomething(
                "Estimate absorbance from image",
                "Estimate",
                on_estimate,
                doc="""\
Press this button to load an image of a sample stained only with the dye
of interest. **UnmixColors** will estimate appropriate red, green and
blue absorbance values from the image.
            """,
            ),
        )

        if can_remove:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this image", self.outputs,
                                    group),
            )
        self.outputs.append(group)
コード例 #12
0
    def add_single_measurement(self, can_delete=True):
        """Add a single measurement to the group of single measurements

        can_delete - True to include a "remove" button, False if you're not
                     allowed to remove it.
        """
        group = SettingsGroup()
        if can_delete:
            group.append("divider", Divider(line=True))

        group.append(
            "object_name",
            LabelSubscriber(
                "Select the object to be classified",
                "None",
                doc="""\
The name of the objects to be classified. You can choose from objects
created by any previous module. See **IdentifyPrimaryObjects**,
**IdentifySecondaryObjects**, **IdentifyTertiaryObjects**, or **Watershed**
""",
            ),
        )

        def object_fn():
            return group.object_name.value

        group.append(
            "measurement",
            Measurement(
                "Select the measurement to classify by",
                object_fn,
                doc="""\
*(Used only if using a single measurement)*

Select a measurement made by a previous module. The objects will be
classified according to their values for this measurement.
""",
            ),
        )

        group.append(
            "bin_choice",
            Choice(
                "Select bin spacing",
                [BC_EVEN, BC_CUSTOM],
                doc="""\
*(Used only if using a single measurement)*

Select how you want to define the spacing of the bins. You have the
following options:

-  *%(BC_EVEN)s:* Choose this if you want to specify bins of equal
   size, bounded by upper and lower limits. If you want two bins, choose
   this option and then provide a single threshold when asked.
-  *%(BC_CUSTOM)s:* Choose this option to create the indicated number
   of bins at evenly spaced intervals between the low and high
   threshold. You also have the option to create bins for objects that
   fall below or above the low and high threshold.
""" % globals(),
            ),
        )

        group.append(
            "bin_count",
            Integer(
                "Number of bins",
                3,
                minval=1,
                doc="""\
*(Used only if using a single measurement)*

This is the number of bins that will be created between
the low and high threshold""",
            ),
        )

        group.append(
            "low_threshold",
            Float(
                "Lower threshold",
                0,
                doc="""\
*(Used only if using a single measurement and "%(BC_EVEN)s" selected)*

This is the threshold that separates the lowest bin from the others. The
lower threshold, upper threshold, and number of bins define the
thresholds of bins between the lowest and highest.
""" % globals(),
            ),
        )

        group.append(
            "wants_low_bin",
            Binary(
                "Use a bin for objects below the threshold?",
                False,
                doc="""\
*(Used only if using a single measurement)*

Select "*Yes*" if you want to create a bin for objects whose values
fall below the low threshold. Select "*No*" if you do not want a bin
for these objects.
""" % globals(),
            ),
        )

        group.append(
            "high_threshold",
            Float(
                "Upper threshold",
                1,
                doc="""\
*(Used only if using a single measurement and "%(BC_EVEN)s" selected)*

This is the threshold that separates the last bin from the others. Note
that if you would like two bins, you should select "*%(BC_CUSTOM)s*".
""" % globals(),
            ),
        )

        group.append(
            "wants_high_bin",
            Binary(
                "Use a bin for objects above the threshold?",
                False,
                doc="""\
*(Used only if using a single measurement)*

Select "*Yes*" if you want to create a bin for objects whose values
are above the high threshold.

Select "*No*" if you do not want a bin for these objects.
""" % globals(),
            ),
        )

        group.append(
            "custom_thresholds",
            Text(
                "Enter the custom thresholds separating the values between bins",
                "0,1",
                doc="""\
*(Used only if using a single measurement and "%(BC_CUSTOM)s" selected)*

This setting establishes the threshold values for the bins. You should
enter one threshold between each bin, separating thresholds with commas
(for example, *0.3, 1.5, 2.1* for four bins). The module will create one
more bin than there are thresholds.
""" % globals(),
            ),
        )

        group.append(
            "wants_custom_names",
            Binary(
                "Give each bin a name?",
                False,
                doc="""\
*(Used only if using a single measurement)*

Select "*Yes*" to assign custom names to bins you have specified.

Select "*No*" for the module to automatically assign names based on
the measurements and the bin number.
""" % globals(),
            ),
        )

        group.append(
            "bin_names",
            Text(
                "Enter the bin names separated by commas",
                "None",
                doc="""\
*(Used only if "Give each bin a name?" is checked)*

Enter names for each of the bins, separated by commas.
An example including three bins might be *First,Second,Third*.""",
            ),
        )

        group.append(
            "wants_images",
            Binary(
                "Retain an image of the classified objects?",
                False,
                doc="""\
Select "*Yes*" to keep an image of the objects which is color-coded
according to their classification, for use later in the pipeline (for
example, to be saved by a **SaveImages** module).
""" % globals(),
            ),
        )

        group.append(
            "image_name",
            ImageName(
                "Name the output image",
                "ClassifiedNuclei",
                doc=
                """Enter the name to be given to the classified object image.""",
            ),
        )

        group.can_delete = can_delete

        def number_of_bins():
            """Return the # of bins in this classification"""
            if group.bin_choice == BC_EVEN:
                value = group.bin_count.value
            else:
                value = len(group.custom_thresholds.value.split(",")) - 1
            if group.wants_low_bin:
                value += 1
            if group.wants_high_bin:
                value += 1
            return value

        group.number_of_bins = number_of_bins

        def measurement_name():
            """Get the measurement name to use inside the bin name

            Account for conflicts with previous measurements
            """
            measurement_name = group.measurement.value
            other_same = 0
            for other in self.single_measurements:
                if id(other) == id(group):
                    break
                if other.measurement.value == measurement_name:
                    other_same += 1
            if other_same > 0:
                measurement_name += str(other_same)
            return measurement_name

        def bin_feature_names():
            """Return the feature names for each bin"""
            if group.wants_custom_names:
                return [
                    name.strip() for name in group.bin_names.value.split(",")
                ]
            return [
                "_".join((measurement_name(), "Bin_%d" % (i + 1)))
                for i in range(number_of_bins())
            ]

        group.bin_feature_names = bin_feature_names

        def validate_group():
            bin_name_count = len(bin_feature_names())
            bin_count = number_of_bins()
            if bin_count < 1:
                bad_setting = (group.bin_count if group.bin_choice == BC_EVEN
                               else group.custom_thresholds)
                raise ValidationError(
                    "You must have at least one bin in order to take measurements. "
                    "Either add more bins or ask for bins for objects above or below threshold",
                    bad_setting,
                )
            if bin_name_count != number_of_bins():
                raise ValidationError(
                    "The number of bin names (%d) does not match the number of bins (%d)."
                    % (bin_name_count, bin_count),
                    group.bin_names,
                )
            for bin_feature_name in bin_feature_names():
                Alphanumeric.validate_alphanumeric_text(
                    bin_feature_name, group.bin_names, True)
            if group.bin_choice == BC_CUSTOM:
                try:
                    [
                        float(x.strip())
                        for x in group.custom_thresholds.value.split(",")
                    ]
                except ValueError:
                    raise ValidationError(
                        "Custom thresholds must be a comma-separated list "
                        'of numbers (example: "1.0, 2.3, 4.5")',
                        group.custom_thresholds,
                    )
            elif group.bin_choice == BC_EVEN:
                if group.low_threshold.value >= group.high_threshold.value:
                    raise ValidationError(
                        "Lower Threshold must be less than Upper Threshold",
                        group.low_threshold,
                    )

        group.validate_group = validate_group

        if can_delete:
            group.remove_settings_button = RemoveSettingButton(
                "", "Remove this classification", self.single_measurements,
                group)
        self.single_measurements.append(group)
コード例 #13
0
    def create_settings(self):
        #
        # The ImageSubscriber "subscribes" to all ImageNameProviders in
        # prior modules. Modules before yours will put images into CellProfiler.
        # The ImageSubscriber gives your user a list of these images
        # which can then be used as inputs in your module.
        #
        self.input_image_name = ImageSubscriber(
            # The text to the left of the edit box
            "Input image name",
            # HTML help that gets displayed when the user presses the
            # help button to the right of the edit box
            doc="""This is the image that the module operates on. You can
            choose any image that is made available by a prior module.
            <br>
            <b>ImageTemplate</b> will do something to this image.
            """)
        #
        # The text.ImageName makes the image available to subsequent
        # modules.
        #
        self.output_image_name = ImageName(
            "Output image name",
            # The second parameter holds a suggested name for the image.
            "OutputImage",
            doc="""This is the image resulting from the operation.""")
        #
        # Here's a choice box - the user gets a drop-down list of what
        # can be done.
        #
        self.transform_choice = Choice(
            "Transform choice",
            # The choice takes a list of possibilities. The first one
            # is the default - the one the user will typically choose.
            [
                M_FOURIER, M_SIMONCELLI_P, M_SIMONCELLI_R, M_TEST_FOURIER,
                M_TEST_SIMONCELLI_P, M_TEST_SIMONCELLI_R, M_HAAR_S, M_HAAR_T,
                M_TEST_HAAR, M_CHEBYSHEV_T
            ],
            #
            # Here, in the documentation, we do a little trick so that
            # we use the actual text that's displayed in the documentation.
            #
            # %(GRADIENT_MAGNITUDE)s will get changed into "Gradient magnitude"
            # etc. Python will look in globals() for the "GRADIENT_" names
            # and paste them in where it sees %(GRADIENT_...)s
            #
            # The <ul> and <li> tags make a neat bullet-point list in the docs
            #
            doc='''There are several transforms available:
             <ul><li><i>Fourier Transform:</i> Blabla. </li>
             <li><i>Wavelet Transform:</i> Blabla. </li>
             <li><i>Chebyshev Transform:</i> Blabla. </li></ul>''' % globals())
        #
        # We use a float setting so that the user can give us a number
        # for the scale. The control will turn red if the user types in
        # an invalid scale.
        #
        self.scale = Integer(
            "Scale",
            # The default value is 1 - a short-range scale
            3,
            # We don't let the user type in really small values
            minval=1,
            # or large values
            maxval=100,
            doc="""This is a scaling factor that supplies the sigma for
            a gaussian that's used to smooth the image. The gradient is
            calculated on the smoothed image, so large scales will give
            you long-range gradients and small scales will give you
            short-range gradients""")

        self.M = Integer(
            "Order",
            # The default value is 1 - a short-range scale
            0,
            # We don't let the user type in really small values
            minval=0,
            # or large values
            maxval=50,
            doc=
            """This is the order of the Chebyshev Transform. A value of 0 will use the order matching the image dimensions."""
        )
コード例 #14
0
    def create_settings(self):
        self.blank_image = Binary(
            "Display outlines on a blank image?",
            False,
            doc="""\
Select "*{YES}*" to produce an image of the outlines on a black background.

Select "*{NO}*" to overlay the outlines on an image you choose.
""".format(
                **{"YES": "Yes", "NO": "No"}
            ),
        )

        self.image_name = ImageSubscriber(
            "Select image on which to display outlines",
            "None",
            doc="""\
*(Used only when a blank image has not been selected)*

Choose the image to serve as the background for the outlines. You can
choose from images that were loaded or created by modules previous to
this one.
""",
        )

        self.line_mode = Choice(
            "How to outline",
            ["Inner", "Outer", "Thick"],
            value="Inner",
            doc="""\
Specify how to mark the boundaries around an object:

-  *Inner:* outline the pixels just inside of objects, leaving
   background pixels untouched.
-  *Outer:* outline pixels in the background around object boundaries.
   When two objects touch, their boundary is also marked.
-  *Thick:* any pixel not completely surrounded by pixels of the same
   label is marked as a boundary. This results in boundaries that are 2
   pixels thick.
""",
        )

        self.output_image_name = ImageName(
            "Name the output image",
            "OrigOverlay",
            doc="""\
Enter the name of the output image with the outlines overlaid. This
image can be selected in later modules (for instance, **SaveImages**).
""",
        )

        self.wants_color = Choice(
            "Outline display mode",
            [WANTS_COLOR, WANTS_GRAYSCALE],
            doc="""\
Specify how to display the outline contours around your objects. Color
outlines produce a clearer display for images where the cell borders
have a high intensity, but take up more space in memory. Grayscale
outlines are displayed with either the highest possible intensity or the
same intensity as the brightest pixel in the image.
""",
        )

        self.spacer = Divider(line=False)

        self.max_type = Choice(
            "Select method to determine brightness of outlines",
            [MAX_IMAGE, MAX_POSSIBLE],
            doc="""\
*(Used only when outline display mode is grayscale)*

The following options are possible for setting the intensity
(brightness) of the outlines:

-  *{MAX_IMAGE}:* Set the brightness to the the same as the brightest
   point in the image.
-  *{MAX_POSSIBLE}:* Set to the maximum possible value for this image
   format.

If your image is quite dim, then putting bright white lines onto it may
not be useful. It may be preferable to make the outlines equal to the
maximal brightness already occurring in the image.
""".format(
                **{"MAX_IMAGE": MAX_IMAGE, "MAX_POSSIBLE": MAX_POSSIBLE}
            ),
        )

        self.outlines = []

        self.add_outline(can_remove=False)

        self.add_outline_button = DoSomething(
            "", "Add another outline", self.add_outline
        )
コード例 #15
0
    def create_settings(self):
        # the list of per image settings (name & scaling factor)
        self.images = []
        # create the first two images (the default number)
        self.add_image(False)
        self.add_image(False)

        # other settings
        self.operation = Choice(
            "Operation",
            [
                O_ADD,
                O_SUBTRACT,
                O_DIFFERENCE,
                O_MULTIPLY,
                O_DIVIDE,
                O_AVERAGE,
                O_MINIMUM,
                O_MAXIMUM,
                O_INVERT,
                O_LOG_TRANSFORM,
                O_LOG_TRANSFORM_LEGACY,
                O_AND,
                O_OR,
                O_NOT,
                O_EQUALS,
                O_NONE,
            ],
            doc="""\
Select the operation to perform. Note that if more than two images are
chosen, then operations will be performed sequentially from first to
last, e.g., for “Divide”, (Image1 / Image2) / Image3

-  *%(O_ADD)s:* Adds the first image to the second, and so on.
-  *%(O_SUBTRACT)s:* Subtracts the second image from the first.
-  *%(O_DIFFERENCE)s:* The absolute value of the difference between the
   first and second images.
-  *%(O_MULTIPLY)s:* Multiplies the first image by the second.
-  *%(O_DIVIDE)s:* Divides the first image by the second.
-  *%(O_AVERAGE)s:* Calculates the mean intensity of the images loaded
   in the module. This is equivalent to the Add option divided by the
   number of images loaded by this module. If you would like to average
   all of the images in an entire pipeline, i.e., across cycles, you
   should instead use the **CorrectIlluminationCalculate** module and
   choose the *All* (vs. *Each*) option.
-  *%(O_MINIMUM)s:* Returns the element-wise minimum value at each
   pixel location.
-  *%(O_MAXIMUM)s:* Returns the element-wise maximum value at each
   pixel location.
-  *%(O_INVERT)s:* Subtracts the image intensities from 1. This makes
   the darkest color the brightest and vice-versa. Note that if a
   mask has been applied to the image, the mask will also be inverted.
-  *%(O_LOG_TRANSFORM)s:* Log transforms each pixel’s intensity. The
   actual function is log\ :sub:`2`\ (image + 1), transforming values
   from 0 to 1 into values from 0 to 1.
-  *%(O_LOG_TRANSFORM_LEGACY)s:* Log\ :sub:`2` transform for backwards
   compatibility.
-  *%(O_NONE)s:* This option is useful if you simply want to select some
   of the later options in the module, such as adding, multiplying, or
   exponentiating your image by a constant.

The following are operations that produce binary images. In a binary
image, the foreground has a truth value of “true” (ones) and the background has
a truth value of “false” (zeros). The operations, *%(O_OR)s, %(O_AND)s and
%(O_NOT)s* will convert the input images to binary by changing all zero
values to background (false) and all other values to foreground (true).

-  *%(O_AND)s:* a pixel in the output image is in the foreground only
   if all corresponding pixels in the input images are also in the
   foreground.
-  *%(O_OR)s:* a pixel in the output image is in the foreground if a
   corresponding pixel in any of the input images is also in the
   foreground.
-  *%(O_NOT)s:* the foreground of the input image becomes the
   background of the output image and vice-versa.
-  *%(O_EQUALS)s:* a pixel in the output image is in the foreground if
   the corresponding pixels in the input images have the same value.

Note that *%(O_INVERT)s*, *%(O_LOG_TRANSFORM)s*,
*%(O_LOG_TRANSFORM_LEGACY)s* and *%(O_NONE)s* operate on only a
single image.
""" % globals(),
        )
        self.divider_top = Divider(line=False)

        self.exponent = Float(
            "Raise the power of the result by",
            1,
            doc="""\
Enter an exponent to raise the result to *after* the chosen operation.""",
        )

        self.after_factor = Float(
            "Multiply the result by",
            1,
            doc="""\
Enter a factor to multiply the result by *after* the chosen operation.""",
        )

        self.addend = Float(
            "Add to result",
            0,
            doc="""\
Enter a number to add to the result *after* the chosen operation.""",
        )

        self.truncate_low = Binary(
            "Set values less than 0 equal to 0?",
            True,
            doc="""\
Values outside the range 0 to 1 might not be handled well by other
modules. Select *Yes* to set negative values to 0.
""" % globals(),
        )

        self.truncate_high = Binary(
            "Set values greater than 1 equal to 1?",
            True,
            doc="""\
Values outside the range 0 to 1 might not be handled well by other
modules. Select *Yes* to set values greater than 1 to a maximum
value of 1.
""" % globals(),
        )

        self.replace_nan = Binary(
            "Replace invalid values with 0?",
            True,
            doc="""\
        Certain operations are mathematically invalid (divide by zero, 
        raise a negative number to the power of a fraction, etc.).
        This setting will set pixels with invalid values to zero.
        Disabling this setting will represent these pixels as "nan" 
        ("Not A Number"). "nan" pixels cannot be displayed properly and 
        may cause errors in other modules.
        """ % globals(),
        )

        self.ignore_mask = Binary(
            "Ignore the image masks?",
            False,
            doc="""\
Select *Yes* to set equal to zero all previously masked pixels and
operate on the masked images as if no mask had been applied. Otherwise,
the smallest image mask is applied after image math has been completed.
""" % globals(),
        )

        self.output_image_name = ImageName(
            "Name the output image",
            "ImageAfterMath",
            doc="""\
Enter a name for the resulting image.""",
        )

        self.add_button = DoSomething("", "Add another image", self.add_image)

        self.divider_bottom = Divider(line=False)
コード例 #16
0
    def create_settings(self):
        self.input_image = ImageSubscriber(
            "Select an input image",
            "None",
            doc=
            """Select the image to be tiled. Additional images within the cycle can be
added later by choosing the "*%(T_ACROSS_CYCLES)s*" option below.
""" % globals(),
        )

        self.output_image = ImageName(
            "Name the output image",
            "TiledImage",
            doc="""Enter a name for the final tiled image.""",
        )

        self.additional_images = []

        self.add_button = DoSomething(
            "",
            "Add another image",
            self.add_image,
            doc="""Add images from other channels to perform similar tiling""",
        )

        self.tile_method = Choice(
            "Tile assembly method",
            T_ALL,
            doc="""\
This setting controls the method by which the final tiled image is
assembled:

-  *%(T_WITHIN_CYCLES)s:* If you have loaded more than one image for
   each cycle using modules upstream in the pipeline, the images can be
   tiled. For example, you may tile three different channels (OrigRed,
   OrigBlue, and OrigGreen), and a new tiled image will be created for
   every image cycle.
-  *%(T_ACROSS_CYCLES)s:* If you want to tile images from multiple
   cycles together, select this option. For example, you may tile all
   the images of the same type (e.g., OrigBlue) across all fields of
   view in your experiment, which will result in one final tiled image
   when processing is complete.
""" % globals(),
        )

        self.rows = Integer(
            "Final number of rows",
            8,
            doc="""\
Specify the number of rows would you like to have in the tiled image.
For example, if you want to show your images in a 96-well format, enter
8.

*Special cases:* Let *M* be the total number of slots for images (i.e,
number of rows x number of columns) and *N* be the number of actual
images.

-  If *M* > *N*, blanks will be used for the empty slots.
-  If the *M* < *N*, an error will occur since there are not enough
   image slots. Check “Automatically calculate number of rows?” to avoid
   this error.
""",
        )

        self.columns = Integer(
            "Final number of columns",
            12,
            doc="""\
Specify the number of columns you like to have in the tiled image. For
example, if you want to show your images in a 96-well format, enter 12.

*Special cases:* Let *M* be the total number of slots for images (i.e,
number of rows x number of columns) and *N* be the number of actual
images.

-  If *M* > *N*, blanks will be used for the empty slots.
-  If the *M* < *N*, an error will occur since there are not enough
   image slots. Check “Automatically calculate number of columns?” to
   avoid this error.
""",
        )

        self.place_first = Choice(
            "Image corner to begin tiling",
            P_ALL,
            doc=
            """Where do you want the first image to be placed? Begin in the upper
left-hand corner for a typical multi-well plate format where the first image is A01.
""",
        )

        self.tile_style = Choice(
            "Direction to begin tiling",
            S_ALL,
            doc=
            """This setting specifies the order that the images are to be arranged. For example, if
your images are named A01, A02, etc, enter "*%(S_ROW)s*".
""" % globals(),
        )

        self.meander = Binary(
            "Use meander mode?",
            False,
            doc="""\
Select "*Yes*" to tile adjacent images in one direction, then the next
row/column is tiled in the opposite direction. Some microscopes capture
images in this fashion. The default mode is “comb”, or “typewriter”
mode; in this mode, when one row is completely tiled in one direction,
the next row starts near where the first row started and tiles again in
the same direction.
""" % globals(),
        )

        self.wants_automatic_rows = Binary(
            "Automatically calculate number of rows?",
            False,
            doc="""\
**Tile** can automatically calculate the number of rows in the grid
based on the number of image cycles that will be processed. Select
"*Yes*" to create a grid that has the number of columns that you
entered and enough rows to display all of your images. Select "*No*"
to specify the number of rows.

If you check both automatic rows and automatic columns, **Tile** will
create a grid that has roughly the same number of rows and columns.
""" % globals(),
        )

        self.wants_automatic_columns = Binary(
            "Automatically calculate number of columns?",
            False,
            doc="""\
**Tile** can automatically calculate the number of columns in the grid
from the number of image cycles that will be processed. Select "*Yes*"
to create a grid that has the number of rows that you entered and enough
columns to display all of your images. Select "*No*" to specify the
number of rows.

If you check both automatic rows and automatic columns, **Tile** will
create a grid that has roughly the same number of rows and columns.
""" % globals(),
        )
コード例 #17
0
    def create_settings(self):
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="Choose the image you want to flip or rotate.",
        )

        self.output_name = ImageName(
            "Name the output image",
            "FlippedOrigBlue",
            doc="Provide a name for the transformed image.",
        )

        self.flip_choice = Choice(
            "Select method to flip image",
            FLIP_ALL,
            doc="""\
Select how the image is to be flipped.""",
        )

        self.rotate_choice = Choice(
            "Select method to rotate image",
            ROTATE_ALL,
            doc="""\
-  *%(ROTATE_NONE)s:* Leave the image unrotated. This should be used if
   you want to flip the image only.
-  *%(ROTATE_ANGLE)s:* Provide the numerical angle by which the image
   should be rotated.
-  *%(ROTATE_COORDINATES)s:* Provide the X,Y pixel locations of two
   points in the image that should be aligned horizontally or
   vertically.
-  *%(ROTATE_MOUSE)s:* CellProfiler will pause so you can select the
   rotation interactively. When prompted during the analysis run, grab
   the image by clicking the left mouse button, rotate the image by
   dragging with the mouse, then release the mouse button. Press the
   *Done* button on the image after rotating the image appropriately.
""" % globals(),
        )

        self.wants_crop = Binary(
            "Crop away the rotated edges?",
            True,
            doc="""\
*(Used only when rotating images)*

When an image is rotated, there will be black space at the
corners/edges; select *Yes* to crop away the incomplete rows and
columns of the image, or select *No* to leave it as-is.

This cropping will produce an image that is not exactly the same size as
the original, which may affect downstream modules.
""" % globals(),
        )

        self.how_often = Choice(
            "Calculate rotation",
            IO_ALL,
            doc="""\
*(Used only when using “%(ROTATE_MOUSE)s” to rotate images)*

Select the cycle(s) at which the calculation is requested and
calculated.
-  *%(IO_INDIVIDUALLY)s:* Determine the amount of rotation for each image individually, e.g., for each cycle.
-  *%(IO_ONCE)s:* Define the rotation only once (on the first image), then apply it to all images.
""" % globals(),
        )

        self.first_pixel = Coordinates(
            "Enter coordinates of the top or left pixel",
            (0, 0),
            doc="""\
*(Used only when using {ROTATE_COORDINATES} to rotate images)*

After rotation, if the specified points are aligned horizontally, this point on the image will be positioned to the
left of the other point. If the specified points are aligned vertically, this point of the image will be positioned
above the other point.
""".format(**{"ROTATE_COORDINATES": ROTATE_COORDINATES}),
        )

        self.second_pixel = Coordinates(
            "Enter the coordinates of the bottom or right pixel",
            (0, 100),
            doc="""\
*(Used only when using {ROTATE_COORDINATES} to rotate images)*

After rotation, if the specified points are aligned horizontally, this point on the image will be positioned to the
right of the other point. If the specified points are aligned vertically, this point of the image will be positioned
below the other point.
""".format(**{"ROTATE_COORDINATES": ROTATE_COORDINATES}),
        )

        self.horiz_or_vert = Choice(
            "Select how the specified points should be aligned",
            C_ALL,
            doc="""\
*(Used only when using “%(ROTATE_COORDINATES)s” to rotate images)*

Specify whether you would like the coordinate points that you entered to
be horizontally or vertically aligned after the rotation is complete.""" %
            globals(),
        )

        self.angle = Float(
            "Enter angle of rotation",
            0,
            doc="""\
*(Used only when using “%(ROTATE_ANGLE)s” to rotate images)*

Enter the angle you would like to rotate the image. This setting is in
degrees, with positive angles corresponding to counterclockwise and
negative as clockwise.""" % globals(),
        )
コード例 #18
0
    def create_settings(self):
        self.object_name = LabelSubscriber(
            "Select objects to measure",
            "None",
            doc="""\
Select the objects whose neighbors you want to measure.""",
        )

        self.neighbors_name = LabelSubscriber(
            "Select neighboring objects to measure",
            "None",
            doc="""\
This is the name of the objects that are potential
neighbors of the above objects. You can find the neighbors
within the same set of objects by selecting the same objects
as above.""",
        )

        self.distance_method = Choice(
            "Method to determine neighbors",
            D_ALL,
            D_EXPAND,
            doc="""\
There are several methods by which to determine whether objects are
neighbors:

-  *%(D_ADJACENT)s:* In this mode, two objects must have adjacent
   boundary pixels to be neighbors.
-  *%(D_EXPAND)s:* The objects are expanded until all pixels on the
   object boundaries are touching another. Two objects are neighbors if
   any of their boundary pixels are adjacent after expansion.
-  *%(D_WITHIN)s:* Each object is expanded by the number of pixels you
   specify. Two objects are neighbors if they have adjacent pixels after
   expansion.

For *%(D_ADJACENT)s* and *%(D_EXPAND)s*, the
*%(M_PERCENT_TOUCHING)s* measurement is the percentage of pixels on
the boundary of an object that touch adjacent objects. For
*%(D_WITHIN)s*, two objects are touching if any of their boundary
pixels are adjacent after expansion and *%(M_PERCENT_TOUCHING)s*
measures the percentage of boundary pixels of an *expanded* object that
touch adjacent objects.
""" % globals(),
        )

        self.distance = Integer(
            "Neighbor distance",
            5,
            1,
            doc="""\
*(Used only when “%(D_WITHIN)s” is selected)*

The Neighbor distance is the number of pixels that each object is
expanded for the neighbor calculation. Expanded objects that touch are
considered neighbors.
""" % globals(),
        )

        self.wants_count_image = Binary(
            "Retain the image of objects colored by numbers of neighbors?",
            False,
            doc="""\
An output image showing the input objects colored by numbers of
neighbors may be retained. A colormap of your choice shows how many
neighbors each object has. The background is set to -1. Objects are
colored with an increasing color value corresponding to the number of
neighbors, such that objects with no neighbors are given a color
corresponding to 0. Use the **SaveImages** module to save this image to
a file.""",
        )

        self.count_image_name = ImageName(
            "Name the output image",
            "ObjectNeighborCount",
            doc="""\
*(Used only if the image of objects colored by numbers of neighbors is
to be retained for later use in the pipeline)*

Specify a name that will allow the image of objects colored by numbers
of neighbors to be selected later in the pipeline.""",
        )

        self.count_colormap = Colormap(
            "Select colormap",
            value="Blues",
            doc="""\
*(Used only if the image of objects colored by numbers of neighbors is
to be retained for later use in the pipeline)*

Select the colormap to use to color the neighbor number image. All
available colormaps can be seen `here`_.

.. _here: http://matplotlib.org/examples/color/colormaps_reference.html""",
        )

        self.wants_percent_touching_image = Binary(
            "Retain the image of objects colored by percent of touching pixels?",
            False,
            doc="""\
Select *Yes* to keep an image of the input objects colored by the
percentage of the boundary touching their neighbors. A colormap of your
choice is used to show the touching percentage of each object. Use the
**SaveImages** module to save this image to a file.
""" % globals(),
        )

        self.touching_image_name = ImageName(
            "Name the output image",
            "PercentTouching",
            doc="""\
*(Used only if the image of objects colored by percent touching is to be
retained for later use in the pipeline)*

Specify a name that will allow the image of objects colored by percent
of touching pixels to be selected later in the pipeline.""",
        )

        self.touching_colormap = Colormap(
            "Select colormap",
            value="Oranges",
            doc="""\
*(Used only if the image of objects colored by percent touching is to be
retained for later use in the pipeline)*

Select the colormap to use to color the percent touching image. All
available colormaps can be seen `here`_.

.. _here: http://matplotlib.org/examples/color/colormaps_reference.html""",
        )

        self.wants_excluded_objects = Binary(
            "Consider objects discarded for touching image border?",
            True,
            doc="""\
When set to *{YES}*, objects which were previously discarded for touching
the image borders will be considered as potential object neighbours in this
analysis. You may want to disable this if using object sets which were
further filtered, since those filters won't have been applied to the
previously discarded objects.""".format(**{"YES": "Yes"}),
        )
コード例 #19
0
    def create_settings(self):
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="""Select the multichannel image you want to convert to grayscale.""",
        )

        self.combine_or_split = Choice(
            "Conversion method",
            [COMBINE, SPLIT],
            doc="""\
How do you want to convert the color image?

-  *%(SPLIT)s:* Splits the channels of a color
   image (e.g., red, green, blue) into separate grayscale images.
-  *%(COMBINE)s:* Converts a color image to a grayscale image by
   combining channels together (e.g., red, green, blue)."""
            % globals(),
        )

        self.rgb_or_channels = Choice(
            "Image type",
            [CH_RGB, CH_HSV, CH_CHANNELS],
            doc="""\
This setting provides three options to choose from:

-  *%(CH_RGB)s:* The RGB (red, green, blue) color space is the typical
   model in which color images are stored. Choosing this option will
   split the image into red, green, and blue component images.
-  *%(CH_HSV)s:* The HSV (hue, saturation, value) color space is based
   on color characteristics such as tint, shade, and tone.
   Choosing this option will split the image into the hue,
   saturation, and value component images.
-  *%(CH_CHANNELS)s:* Many images contain color channels other than RGB
   or HSV. For instance, GIF and PNG formats can have an alpha
   channel that encodes transparency. TIF formats can have an arbitrary
   number of channels which represent pixel measurements made by
   different detectors, filters or lighting conditions. This setting
   allows you to handle a more complex model for images that
   have more than three channels."""
            % globals(),
        )

        # The following settings are used for the combine option
        self.grayscale_name = ImageName(
            "Name the output image",
            "OrigGray",
            doc="""\
*(Used only when combining channels)*

Enter a name for the resulting grayscale image.""",
        )

        self.red_contribution = Float(
            "Relative weight of the red channel",
            1,
            0,
            doc="""\
*(Used only when combining channels)*

Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.""",
        )

        self.green_contribution = Float(
            "Relative weight of the green channel",
            1,
            0,
            doc="""\
*(Used only when combining channels)*

Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.""",
        )

        self.blue_contribution = Float(
            "Relative weight of the blue channel",
            1,
            0,
            doc="""\
*(Used only when combining channels)*

Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.""",
        )

        # The following settings are used for the split RGB option
        self.use_red = Binary(
            "Convert red to gray?",
            True,
            doc="""\
*(Used only when splitting RGB images)*

Select *"Yes"* to extract the red channel to grayscale. Otherwise, the
red channel will be ignored.
"""
            % globals(),
        )

        self.red_name = ImageName(
            "Name the output image",
            "OrigRed",
            doc="""\
*(Used only when splitting RGB images)*

Enter a name for the resulting grayscale image coming from the red channel.""",
        )

        self.use_green = Binary(
            "Convert green to gray?",
            True,
            doc="""\
*(Used only when splitting RGB images)*

Select *"Yes"* to extract the green channel to grayscale. Otherwise, the
green channel will be ignored.
"""
            % globals(),
        )

        self.green_name = ImageName(
            "Name the output image",
            "OrigGreen",
            doc="""\
*(Used only when splitting RGB images)*

Enter a name for the resulting grayscale image coming from the green channel.""",
        )

        self.use_blue = Binary(
            "Convert blue to gray?",
            True,
            doc="""\
*(Used only when splitting RGB images)*

Select *"Yes"* to extract the blue channel to grayscale. Otherwise, the
blue channel will be ignored.
"""
            % globals(),
        )

        self.blue_name = ImageName(
            "Name the output image",
            "OrigBlue",
            doc="""\
*(Used only when splitting RGB images)*

Enter a name for the resulting grayscale image coming from the blue channel.""",
        )

        # The following settings are used for the split HSV option
        self.use_hue = Binary(
            "Convert hue to gray?",
            True,
            doc="""\
*(Used only when splitting HSV images)*

Select *"Yes"* to extract the hue to grayscale. Otherwise, the hue
will be ignored.
"""
            % globals(),
        )

        self.hue_name = ImageName(
            "Name the output image",
            "OrigHue",
            doc="""\
*(Used only when splitting HSV images)*

Enter a name for the resulting grayscale image coming from the hue.""",
        )

        self.use_saturation = Binary(
            "Convert saturation to gray?",
            True,
            doc="""\
*(Used only when splitting HSV images)*

Select *"Yes"* to extract the saturation to grayscale. Otherwise, the
saturation will be ignored.
"""
            % globals(),
        )

        self.saturation_name = ImageName(
            "Name the output image",
            "OrigSaturation",
            doc="""\
*(Used only when splitting HSV images)*

Enter a name for the resulting grayscale image coming from the saturation.""",
        )

        self.use_value = Binary(
            "Convert value to gray?",
            True,
            doc="""\
*(Used only when splitting HSV images)*

Select *"Yes"* to extract the value to grayscale. Otherwise, the
value will be ignored.
"""
            % globals(),
        )

        self.value_name = ImageName(
            "Name the output image",
            "OrigValue",
            doc="""\
*(Used only when splitting HSV images)*

Enter a name for the resulting grayscale image coming from the value.""",
        )

        # The alternative model:
        self.channels = []
        self.add_channel(False)
        self.channel_button = DoSomething("", "Add another channel", self.add_channel)

        self.channel_count = HiddenCount(self.channels, "Channel count")
コード例 #20
0
    def create_settings(self):
        # Input settings
        self.input_color_choice = Choice(
            "Input image type",
            CC_ALL,
            doc=
            "Specify whether you are combining several grayscale images or loading a single color image.",
        )

        self.wants_red_input = Binary(
            "Use a red image?",
            True,
            doc="""\
*(Used only if input image type is "{CC_GRAYSCALE}")*

Select "*Yes*" to specify an image to use for the red channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.red_input_image = ImageSubscriber(
            "Select the red image",
            "None",
            doc="""\
*(Used only if input image type is "{CC_GRAYSCALE}" and a red image is used)*

Provide an image for the red channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.wants_green_input = Binary(
            "Use a green image?",
            True,
            doc="""\
*(Used only if input image type is "{CC_GRAYSCALE}")*

Select "*Yes*" to specify an image to use for the green channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.green_input_image = ImageSubscriber(
            "Select the green image",
            "None",
            doc="""\
*(Used only if input image type is "{CC_GRAYSCALE}" and a green image is used)*

Provide an image for the green channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.wants_blue_input = Binary(
            "Use a blue image?",
            True,
            doc="""\
*(Used only if input image type is "{CC_GRAYSCALE}")*

Select "*Yes*" to specify an image to use for the blue channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.blue_input_image = ImageSubscriber(
            "Select the blue image",
            "None",
            doc="""\
*(Used only if input image type is "{CC_GRAYSCALE}" and a blue image is used)*

Provide an image for the blue channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.color_input_image = ImageSubscriber(
            "Select the color image",
            "None",
            doc="""
*(Used only if input image type is "{CC_COLOR}")*

Select the color image to use.
""".format(**{"CC_COLOR": CC_COLOR}),
        )

        # Output settings
        self.output_color_choice = Choice(
            "Output image type",
            CC_ALL,
            doc=
            "Specify whether you want to produce several grayscale images or one color image.",
        )

        self.wants_red_output = Binary(
            'Select "*Yes*" to produce a red image.',
            True,
            doc="""\
*(Used only if output image type is "{CC_GRAYSCALE}")*

Select "*Yes*" to produce a grayscale image corresponding to the inverted red channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.red_output_image = ImageName(
            "Name the red image",
            "InvertedRed",
            doc="""\
*(Used only if output image type is "{CC_GRAYSCALE}" and a red image is output)*

Provide a name for the inverted red channel image.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.wants_green_output = Binary(
            'Select "*Yes*" to produce a green image.',
            True,
            doc="""\
*(Used only if output image type is "{CC_GRAYSCALE}")*

Select "*Yes*" to produce a grayscale image corresponding to the inverted green channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.green_output_image = ImageName(
            "Name the green image",
            "InvertedGreen",
            doc="""\
*(Used only if output image type is "{CC_GRAYSCALE}" and a green image is output)*

Provide a name for the inverted green channel image.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.wants_blue_output = Binary(
            'Select "*Yes*" to produce a blue image.',
            True,
            doc="""\
*(Used only if output image type is "{CC_GRAYSCALE}")*

Select "*Yes*" to produce a grayscale image corresponding to the inverted blue channel.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.blue_output_image = ImageName(
            "Name the blue image",
            "InvertedBlue",
            doc="""\
*(Used only if output image type is "{CC_GRAYSCALE}" and a blue image is output)*

Provide a name for the inverted blue channel image.
""".format(**{"CC_GRAYSCALE": CC_GRAYSCALE}),
        )

        self.color_output_image = ImageName(
            "Name the inverted color image",
            "InvertedColor",
            doc="""\
*(Used only when producing a color output image)*

Enter a name for the inverted color image.
""",
        )
コード例 #21
0
    def create_settings(self):
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="Select the images to be made into a projection.",
        )

        self.projection_type = Choice(
            "Type of projection",
            P_ALL,
            doc="""\
The final projection image can be created by the following methods:

-  *%(P_AVERAGE)s:* Use the average pixel intensity at each pixel
   position.
-  *%(P_MAXIMUM)s:* Use the maximum pixel value at each pixel position.
-  *%(P_MINIMUM)s:* Use the minimum pixel value at each pixel position.
-  *%(P_SUM)s:* Add the pixel values at each pixel position.
-  *%(P_VARIANCE)s:* Compute the variance at each pixel position.
   The variance method is described in Selinummi et al (2009). The
   method is designed to operate on a Z-stack of brightfield images
   taken at different focus planes. Background pixels will have
   relatively uniform illumination whereas cytoplasm pixels will have
   higher variance across the Z-stack.
-  *%(P_POWER)s:* Compute the power at a given frequency at each pixel
   position.
   The power method is experimental. The method computes the power at a
   given frequency through the Z-stack. It might be used with a phase
   contrast image where the signal at a given pixel will vary
   sinusoidally with depth. The frequency is measured in Z-stack steps
   and pixels that vary with the given frequency will have a higher
   score than other pixels with similar variance, but different
   frequencies.
-  *%(P_BRIGHTFIELD)s:* Perform the brightfield projection at each
   pixel position.
   Artifacts such as dust appear as black spots that are most strongly
   resolved at their focal plane with gradually increasing signals
   below. The brightfield method scores these as zero since the dark
   appears in the early Z-stacks. These pixels have a high score for the
   variance method but have a reduced score when using the brightfield
   method.
-  *%(P_MASK)s:* Compute a binary image of the pixels that are masked
   in any of the input images.
   The mask method operates on any masks that might have been applied to
   the images in a group. The output is a binary image where the “1”
   pixels are those that are not masked in all of the images and the “0”
   pixels are those that are masked in one or more of the images.
   You can use the output of the mask method to mask or crop all of the
   images in a group similarly. Use the mask method to combine all of
   the masks in a group, save the image and then use **Crop**,
   **MaskImage** or **MaskObjects** in another pipeline to mask all
   images or objects in the group similarly.

References
^^^^^^^^^^

-  Selinummi J, Ruusuvuori P, Podolsky I, Ozinsky A, Gold E, et al.
   (2009) “Bright field microscopy as an alternative to whole cell
   fluorescence in automated analysis of macrophage images”, *PLoS ONE*
   4(10): e7497 `(link)`_.

.. _(link): https://doi.org/10.1371/journal.pone.0007497
""" % globals(),
        )

        self.projection_image_name = ImageName(
            "Name the output image",
            "ProjectionBlue",
            doc="Enter the name for the projected image.",
            provided_attributes={
                "aggregate_image": True,
                "available_on_last": True,
            },
        )
        self.frequency = Float(
            "Frequency",
            6.0,
            minval=1.0,
            doc="""\
*(Used only if "%(P_POWER)s" is selected as the projection method)*

This setting controls the frequency at which the power is measured. A
frequency of 2 will respond most strongly to pixels that alternate
between dark and light in successive z-stack slices. A frequency of N
will respond most strongly to pixels whose brightness cycles every N
slices.""" % globals(),
        )
コード例 #22
0
    def create_settings(self):
        super(RunStarDist, self).create_settings()

        self.model = Choice(
            text="Model",
            choices=MODEL_OPTIONS,
            value=GREY_1,
            doc="""\
StarDist comes with models for detecting nuclei. Alternatively, you can supply a custom-trained model 
generated outside of CellProfiler within Python. Custom models can be useful if working with unusual cell types.

The inbuilt fluorescent and DSB models expect greyscale images. The H&E model expects a color image as input (from 
brightfield). Custom models will require images of the type they were trained with. It should be noted that the 
models supplied with StarDist do not support 3D images, but it's possible to train and use your own.
""",
        )

        self.tile_image = Binary(
            text="Tile input image?",
            value=False,
            doc="""\
If enabled, the input image will be broken down into overlapping tiles. 
This can help to conserve memory when working with large images.

The image is split into a set number of vertical and horizontal tiles. 
The total number of tiles will be the result of multiplying the horizontal 
and vertical tile number.""",
        )

        self.n_tiles_x = Integer(text="Horizontal tiles",
                                 value=1,
                                 minval=1,
                                 doc="""\
Specify the number of tiles to break the image down into along the x-axis (horizontal)."""
                                 )

        self.n_tiles_y = Integer(text="Vertical tiles",
                                 value=1,
                                 minval=1,
                                 doc="""\
Specify the number of tiles to break the image down into along the y-axis (vertical)."""
                                 )

        self.save_probabilities = Binary(
            text="Save probability image?",
            value=False,
            doc="""
If enabled, the probability scores from the model will be recorded as a new image. 
Probability scales from 0-1, with 1 representing absolute certainty of a pixel being in a cell. 
You may want to use a custom threshold to manually generate objects.""",
        )

        self.probabilities_name = ImageName(
            "Name the probability image",
            "Probabilities",
            doc=
            "Enter the name you want to call the probability image produced by this module.",
        )

        self.model_directory = Directory("Model folder",
                                         doc=f"""\
*(Used only when using a custom pre-trained model)*

Select the folder containing your StarDist model. This should have the config, threshold and weights files 
exported after training.""")

        self.gpu_test = DoSomething(
            "",
            "Test GPU",
            self.do_check_gpu,
            doc=f"""\
Press this button to check whether a GPU is correctly configured.

If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the 
required dependencies are not installed. 
Make sure you followed the setup instructions here: https://www.tensorflow.org/install/gpu

If you don't have a GPU or it's not configured, StarDist will instead run on the CPU. 
This will be slower but should work on any system.
""",
        )
コード例 #23
0
    def create_settings(self):
        """Create your settings by subclassing this function

        create_settings is called at the end of initialization.

        You should create the setting variables for your module here:
            # Ask the user for the input image
            self.image_name = .ImageSubscriber(...)
            # Ask the user for the name of the output image
            self.output_image = .ImageName(...)
            # Ask the user for a parameter
            self.smoothing_size = .Float(...)
        """
        self.objects_or_image = Choice(
            "Display object or image measurements?",
            [OI_OBJECTS, OI_IMAGE],
            doc="""\
-  *%(OI_OBJECTS)s* displays measurements made on objects.
-  *%(OI_IMAGE)s* displays a single measurement made on an image.
""" % globals(),
        )

        self.objects_name = LabelSubscriber(
            "Select the input objects",
            "None",
            doc="""\
*(Used only when displaying object measurements)*

Choose the name of objects identified by some previous module (such as
**IdentifyPrimaryObjects** or **IdentifySecondaryObjects**).
""",
        )

        def object_fn():
            if self.objects_or_image == OI_OBJECTS:
                return self.objects_name.value
            else:
                return "Image"

        self.measurement = Measurement(
            "Measurement to display",
            object_fn,
            doc="""\
Choose the measurement to display. This will be a measurement made by
some previous module on either the whole image (if displaying a single
image measurement) or on the objects you selected.
""",
        )

        self.wants_image = Binary(
            "Display background image?",
            True,
            doc="""\
Choose whether or not to display the measurements on
a background image. Usually, you will want to see the image
context for the measurements, but it may be useful to save
just the overlay of the text measurements and composite the
overlay image and the original image later. Choose "Yes" to
display the measurements on top of a background image or "No"
to display the measurements on a black background.""",
        )

        self.image_name = ImageSubscriber(
            "Select the image on which to display the measurements",
            "None",
            doc="""\
Choose the image to be displayed behind the measurements.
This can be any image created or loaded by a previous module.
If you have chosen not to display the background image, the image
will only be used to determine the dimensions of the displayed image.""",
        )

        self.color_or_text = Choice(
            "Display mode",
            [CT_TEXT, CT_COLOR],
            doc="""\
*(Used only when displaying object measurements)*

Choose how to display the measurement information. If you choose
%(CT_TEXT)s, **DisplayDataOnImage** will display the numeric value on
top of each object. If you choose %(CT_COLOR)s, **DisplayDataOnImage**
will convert the image to grayscale, if necessary, and display the
portion of the image within each object using a hue that indicates the
measurement value relative to the other objects in the set using the
default color map.
""" % globals(),
        )

        self.colormap = Colormap(
            "Color map",
            doc="""\
*(Used only when displaying object measurements)*

This is the color map used as the color gradient for coloring the
objects by their measurement values. See `this page`_ for pictures
of the available colormaps.

.. _this page: http://matplotlib.org/users/colormaps.html
            """,
        )
        self.text_color = Color(
            "Text color",
            "red",
            doc=
            """This is the color that will be used when displaying the text.""",
        )

        self.display_image = ImageName(
            "Name the output image that has the measurements displayed",
            "DisplayImage",
            doc="""\
The name that will be given to the image with the measurements
superimposed. You can use this name to refer to the image in subsequent
modules (such as **SaveImages**).
""",
        )

        self.font_size = Integer(
            "Font size (points)",
            10,
            minval=1,
            doc="""Set the font size of the letters to be displayed.""",
        )

        self.decimals = Integer(
            "Number of decimals",
            2,
            minval=0,
            doc=
            """Set how many decimals to be displayed, for example 2 decimals for 0.01; 3 decimals for 0.001.""",
        )

        self.saved_image_contents = Choice(
            "Image elements to save",
            [E_IMAGE, E_FIGURE, E_AXES],
            doc="""\
This setting controls the level of annotation on the image:

-  *%(E_IMAGE)s:* Saves the image with the overlaid measurement
   annotations.
-  *%(E_AXES)s:* Adds axes with tick marks and image coordinates.
-  *%(E_FIGURE)s:* Adds a title and other decorations.
""" % globals(),
        )

        self.offset = Integer(
            "Annotation offset (in pixels)",
            0,
            doc="""\
Add a pixel offset to the measurement. Normally, the text is
placed at the object (or image) center, which can obscure relevant features of
the object. This setting adds a specified offset to the text, in a random
direction.""",
        )

        self.color_map_scale_choice = Choice(
            "Color map scale",
            [CMS_USE_MEASUREMENT_RANGE, CMS_MANUAL],
            doc="""\
*(Used only when displaying object measurements as a colormap)*

**DisplayDataOnImage** assigns a color to each object’s measurement
value from a colormap when in colormap-mode, mapping the value to a
color along the colormap’s continuum. This mapping has implicit upper
and lower bounds to its range which are the extremes of the colormap.
This setting determines whether the extremes are the minimum and
maximum values of the measurement from among the objects in the
current image or manually-entered extremes.

-  *%(CMS_USE_MEASUREMENT_RANGE)s:* Use the full range of colors to
   get the maximum contrast within the image.
-  *%(CMS_MANUAL)s:* Manually set the upper and lower bounds so that
   images with different maxima and minima can be compared by a uniform
   color mapping.
""" % globals(),
        )
        self.color_map_scale = FloatRange(
            "Color map range",
            value=(0.0, 1.0),
            doc="""\
*(Used only when setting a manual colormap range)*

This setting determines the lower and upper bounds of the values for the
color map.
""",
        )
コード例 #24
0
    def add_image(self, can_delete=True):
        """Add an image and its settings to the list of images"""
        image_name = ImageSubscriber(
            "Select the input image", "None", doc="Select the image to be corrected."
        )

        corrected_image_name = ImageName(
            "Name the output image",
            "CorrBlue",
            doc="Enter a name for the corrected image.",
        )

        illum_correct_function_image_name = ImageSubscriber(
            "Select the illumination function",
            "None",
            doc="""\
Select the illumination correction function image that will be used to
carry out the correction. This image is usually produced by another
module or loaded as a .mat or .npy format image using the **Images** module
or a **LoadData** module.

Note that loading .mat format images is deprecated and will be removed in
a future version of CellProfiler. You can export .mat format images as
.npy format images using **SaveImages** to ensure future compatibility.
""",
        )

        divide_or_subtract = Choice(
            "Select how the illumination function is applied",
            [DOS_DIVIDE, DOS_SUBTRACT],
            doc="""\
This choice depends on how the illumination function was calculated and
on your physical model of the way illumination variation affects the
background of images relative to the objects in images; it is also
somewhat empirical.

-  *%(DOS_SUBTRACT)s:* Use this option if the background signal is
   significant relative to the real signal coming from the cells. If you
   created the illumination correction function using
   *Background*, then you will want to choose
   *%(DOS_SUBTRACT)s* here.
-  *%(DOS_DIVIDE)s:* Choose this option if the signal to background
   ratio is high (the cells are stained very strongly). If you created
   the illumination correction function using *Regular*, then
   you will want to choose *%(DOS_DIVIDE)s* here.
"""
            % globals(),
        )

        image_settings = SettingsGroup()
        image_settings.append("image_name", image_name)
        image_settings.append("corrected_image_name", corrected_image_name)
        image_settings.append(
            "illum_correct_function_image_name", illum_correct_function_image_name
        )
        image_settings.append("divide_or_subtract", divide_or_subtract)
        image_settings.append("rescale_option", RE_NONE)

        if can_delete:
            image_settings.append(
                "remover",
                RemoveSettingButton(
                    "", "Remove this image", self.images, image_settings
                ),
            )
        image_settings.append("divider", Divider())
        self.images.append(image_settings)
コード例 #25
0
    def create_settings(self):
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="""Select the image whose edges you want to enhance.""",
        )

        self.output_image_name = ImageName(
            "Name the output image",
            "EdgedImage",
            doc="""Enter a name for the resulting image with edges enhanced.""",
        )

        self.method = Choice(
            "Select an edge-finding method",
            [M_SOBEL, M_PREWITT, M_ROBERTS, M_LOG, M_CANNY, M_KIRSCH],
            doc="""\
There are several methods that can be used to enhance edges. Often, it
is best to test them against each other empirically:

-  *%(M_SOBEL)s:* Finds edges using the %(M_SOBEL)s approximation to
   the derivative. The %(M_SOBEL)s method derives a horizontal and
   vertical gradient measure and returns the square-root of the sum of
   the two squared signals.
-  *%(M_PREWITT)s:* Finds edges using the %(M_PREWITT)s approximation
   to the derivative. It returns edges at those points where the
   gradient of the image is maximum.
-  *%(M_ROBERTS)s:* Finds edges using the Roberts approximation to the
   derivative. The %(M_ROBERTS)s method looks for gradients in the
   diagonal and anti-diagonal directions and returns the square-root of
   the sum of the two squared signals. This method is fast, but it
   creates diagonal artifacts that may need to be removed by smoothing.
-  *%(M_LOG)s:* Applies a Laplacian of Gaussian filter to the image and
   finds zero crossings.
-  *%(M_CANNY)s:* Finds edges by looking for local maxima of the
   gradient of the image. The gradient is calculated using the
   derivative of a Gaussian filter. The method uses two thresholds to
   detect strong and weak edges, and includes the weak edges in the
   output only if they are connected to strong edges. This method is
   therefore less likely than the others to be fooled by noise, and more
   likely to detect true weak edges.
-  *%(M_KIRSCH)s:* Finds edges by calculating the gradient among the 8
   compass points (North, North-east, etc.) and selecting the maximum as
   the pixel’s value.
""" % globals(),
        )

        self.wants_automatic_threshold = Binary(
            "Automatically calculate the threshold?",
            True,
            doc="""\
*(Used only with the "%(M_CANNY)s" option and automatic thresholding)*

Select *Yes* to automatically calculate the threshold using a
three-category Otsu algorithm performed on the Sobel transform of the
image.

Select *No* to manually enter the threshold value.
""" % globals(),
        )

        self.manual_threshold = Float(
            "Absolute threshold",
            0.2,
            0,
            1,
            doc="""\
*(Used only with the "%(M_CANNY)s" option and manual thresholding)*

The upper cutoff for Canny edges. All Sobel-transformed pixels with this
value or higher will be marked as an edge. You can enter a threshold
between 0 and 1.
""" % globals(),
        )

        self.threshold_adjustment_factor = Float(
            "Threshold adjustment factor",
            1,
            doc="""\
*(Used only with the "%(M_CANNY)s" option and automatic thresholding)*

This threshold adjustment factor is a multiplier that is applied to both
the lower and upper Canny thresholds if they are calculated
automatically. An adjustment factor of 1 indicates no adjustment. The
adjustment factor has no effect on any threshold entered manually.
""" % globals(),
        )

        self.direction = Choice(
            "Select edge direction to enhance",
            [E_ALL, E_HORIZONTAL, E_VERTICAL],
            doc="""\
*(Used only with "%(M_PREWITT)s" and "%(M_SOBEL)s" methods)*

Select the direction of the edges you aim to identify in the image
(predominantly horizontal, predominantly vertical, or both).
""" % globals(),
        )

        self.wants_automatic_sigma = Binary(
            "Calculate Gaussian's sigma automatically?",
            True,
            doc="""\
Select *Yes* to automatically calculate the Gaussian's sigma.

Select *No* to manually enter the value.
""" % globals(),
        )

        self.sigma = Float("Gaussian's sigma value",
                           10,
                           doc="""Set a value for Gaussian's sigma.""")

        self.wants_automatic_low_threshold = Binary(
            "Calculate value for low threshold automatically?",
            True,
            doc="""\
*(Used only with the "%(M_CANNY)s" option and automatic thresholding)*

Select *Yes* to automatically calculate the low / soft threshold
cutoff for the %(M_CANNY)s method.

Select *No* to manually enter the low threshold value.
""" % globals(),
        )

        self.low_threshold = Float(
            "Low threshold value",
            0.1,
            0,
            1,
            doc="""\
*(Used only with the "%(M_CANNY)s" option and manual thresholding)*

Enter the soft threshold cutoff for the %(M_CANNY)s method. The
%(M_CANNY)s method will mark all %(M_SOBEL)s-transformed pixels with
values below this threshold as not being edges.
""" % globals(),
        )
コード例 #26
0
    def create_settings(self):
        """Create the settings here and set the module name (initialization)

        """
        self.source_choice = Choice(
            "Use objects or an image as a mask?",
            [IO_OBJECTS, IO_IMAGE],
            doc="""\
You can mask an image in two ways:

-  *%(IO_OBJECTS)s*: Using objects created by another module (for
   instance **IdentifyPrimaryObjects**). The module will mask out all
   parts of the image that are not within one of the objects (unless you
   invert the mask).
-  *%(IO_IMAGE)s*: Using a binary image as the mask, where black
   portions of the image (false or zero-value pixels) will be masked
   out. If the image is not binary, the module will use all pixels whose
   intensity is greater than 0.5 as the mask’s foreground (white area).
   You can use **Threshold** instead to create a binary image with
   finer control over the intensity choice.
   """ % globals(),
        )

        self.object_name = LabelSubscriber(
            "Select object for mask",
            "None",
            doc="""\
*(Used only if mask is to be made from objects)*

Select the objects you would like to use to mask the input image.
""",
        )

        self.masking_image_name = ImageSubscriber(
            "Select image for mask",
            "None",
            doc="""\
*(Used only if mask is to be made from an image)*

Select the image that you like to use to mask the input image.
""",
        )

        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="Select the image that you want to mask.",
        )

        self.masked_image_name = ImageName(
            "Name the output image",
            "MaskBlue",
            doc="Enter the name for the output masked image.",
        )

        self.invert_mask = Binary(
            "Invert the mask?",
            False,
            doc="""\
This option reverses the foreground/background relationship of the mask.

-  Select "*No*" to produce the mask from the foreground (white
   portion) of the masking image or the area within the masking objects.
-  Select "*Yes*" to instead produce the mask from the *background*
   (black portions) of the masking image or the area *outside* the
   masking objects.
       """ % globals(),
        )
コード例 #27
0
    def create_settings(self):
        """Create the settings for the module

        Create the settings for the module during initialization.
        """
        self.contrast_choice = Choice(
            "Make each classification decision on how many measurements?",
            [BY_SINGLE_MEASUREMENT, BY_TWO_MEASUREMENTS],
            doc="""\
This setting controls how many measurements are used to make a
classifications decision for each object:

-  *%(BY_SINGLE_MEASUREMENT)s:* Classifies each object based on a
   single measurement.
-  *%(BY_TWO_MEASUREMENTS)s:* Classifies each object based on a pair
   of measurements taken together (that is, an object must meet two
   criteria to belong to a class).
""" % globals(),
        )

        ############### Single measurement settings ##################
        #
        # A list holding groupings for each of the single measurements
        # to be done
        #
        self.single_measurements = []
        #
        # A count of # of measurements
        #
        self.single_measurement_count = HiddenCount(self.single_measurements)
        #
        # Add one single measurement to start off
        #
        self.add_single_measurement(False)
        #
        # A button to press to get another measurement
        #
        self.add_measurement_button = DoSomething(
            "", "Add another classification", self.add_single_measurement)
        #
        ############### Two-measurement settings #####################
        #
        # The object for the contrasting method
        #
        self.object_name = LabelSubscriber(
            "Select the object name",
            "None",
            doc="""\
Choose the object that you want to measure from the list. This should be
an object created by a previous module such as
**IdentifyPrimaryObjects**, **IdentifySecondaryObjects**, **IdentifyTertiaryObjects**, or **Watershed**
""",
        )

        #
        # The two measurements for the contrasting method
        #
        def object_fn():
            return self.object_name.value

        self.first_measurement = Measurement(
            "Select the first measurement",
            object_fn,
            doc="""\
*(Used only if using a pair of measurements)*

Choose a measurement made on the above object. This is the first of two
measurements that will be contrasted together. The measurement should be
one made on the object in a prior module.
""",
        )

        self.first_threshold_method = Choice(
            "Method to select the cutoff",
            [TM_MEAN, TM_MEDIAN, TM_CUSTOM],
            doc="""\
*(Used only if using a pair of measurements)*

Objects are classified as being above or below a cutoff value for a
measurement. You can set this cutoff threshold in one of three ways:

-  *%(TM_MEAN)s*: At the mean of the measurement’s value for all
   objects in the image cycle.
-  *%(TM_MEDIAN)s*: At the median of the measurement’s value for all
   objects in the image set.
-  *%(TM_CUSTOM)s*: You specify a custom threshold value.
""" % globals(),
        )

        self.first_threshold = Float(
            "Enter the cutoff value",
            0.5,
            doc="""\
*(Used only if using a pair of measurements)*

This is the cutoff value separating objects in the two classes.""",
        )

        self.second_measurement = Measurement(
            "Select the second measurement",
            object_fn,
            doc="""\
*(Used only if using a pair of measurements)*

Select a measurement made on the above object. This is
the second of two measurements that will be contrasted together.
The measurement should be one made on the object in a prior
module.""",
        )

        self.second_threshold_method = Choice(
            "Method to select the cutoff",
            [TM_MEAN, TM_MEDIAN, TM_CUSTOM],
            doc="""\
*(Used only if using a pair of measurements)*

Objects are classified as being above or below a cutoff value for a
measurement. You can set this cutoff threshold in one of three ways:

-  *%(TM_MEAN)s:* At the mean of the measurement’s value for all
   objects in the image cycle.
-  *%(TM_MEDIAN)s:* At the median of the measurement’s value for all
   objects in the image set.
-  *%(TM_CUSTOM)s:* You specify a custom threshold value.
""" % globals(),
        )

        self.second_threshold = Float(
            "Enter the cutoff value",
            0.5,
            doc="""\
*(Used only if using a pair of measurements)*

This is the cutoff value separating objects in the two classes.""",
        )

        self.wants_custom_names = Binary(
            "Use custom names for the bins?",
            False,
            doc="""\
*(Used only if using a pair of measurements)*

Select "*Yes*" if you want to specify the names of each bin
measurement.

Select "*No*" to create names based on the measurements. For instance,
for “Intensity_MeanIntensity_Green” and
“Intensity_TotalIntensity_Blue”, the module generates measurements
such as
“Classify_Intensity_MeanIntensity_Green_High_Intensity_TotalIntensity_Low”.
""" % globals(),
        )

        self.low_low_custom_name = Alphanumeric(
            "Enter the low-low bin name",
            "low_low",
            doc="""\
*(Used only if using a pair of measurements)*

Name of the measurement for objects that fall below the threshold for
both measurements.
""",
        )

        self.low_high_custom_name = Alphanumeric(
            "Enter the low-high bin name",
            "low_high",
            doc="""\
*(Used only if using a pair of measurements)*

Name of the measurement for objects whose
first measurement is below threshold and whose second measurement
is above threshold.
""",
        )

        self.high_low_custom_name = Alphanumeric(
            "Enter the high-low bin name",
            "high_low",
            doc="""\
*(Used only if using a pair of measurements)*

Name of the measurement for objects whose
first measurement is above threshold and whose second measurement
is below threshold.""",
        )

        self.high_high_custom_name = Alphanumeric(
            "Enter the high-high bin name",
            "high_high",
            doc="""\
*(Used only if using a pair of measurements)*

Name of the measurement for objects that
are above the threshold for both measurements.""",
        )

        self.wants_image = Binary(
            "Retain an image of the classified objects?",
            False,
            doc="""\
Select "*Yes*" to retain the image of the objects color-coded
according to their classification, for use later in the pipeline (for
example, to be saved by a **SaveImages** module).
""" % globals(),
        )

        self.image_name = ImageName(
            "Enter the image name",
            "None",
            doc="""\
*(Used only if the classified object image is to be retained for later use in the pipeline)*

Enter the name to be given to the classified object image.""",
        )
コード例 #28
0
    def create_settings(self):
        self.scheme_choice = Choice(
            "Select a color scheme",
            [SCHEME_RGB, SCHEME_CMYK, SCHEME_STACK, SCHEME_COMPOSITE],
            doc="""\
This module can use one of two color schemes to combine images:

-  *%(SCHEME_RGB)s*: Each input image determines the intensity of one
   of the color channels: red, green, and blue.
-  *%(SCHEME_CMYK)s*: Three of the input images are combined to
   determine the colors (cyan, magenta, and yellow) and a fourth is used
   only for brightness. The cyan image adds equally to the green and
   blue intensities. The magenta image adds equally to the red and blue
   intensities. The yellow image adds equally to the red and green
   intensities.
-  *%(SCHEME_STACK)s*: The channels are stacked in the order listed,
   from top to bottom. An arbitrary number of channels is allowed.

   For example, you could create a 5-channel image by providing
   5 grayscale images. The first grayscale image you provide will fill
   the first channel, the second grayscale image you provide will fill
   the second channel, and so on.
-  *%(SCHEME_COMPOSITE)s*: A color is assigned to each grayscale image.
   Each grayscale image is converted to color by multiplying the
   intensity by the color and the resulting color images are added
   together. An arbitrary number of channels can be composited into a
   single color image.
"""
            % globals(),
        )

        self.wants_rescale = Binary(
            "Rescale intensity",
            True,
            doc="""\
Choose whether to rescale each channel individually to 
the range of 0-1. This prevents clipping of channels with intensity 
above 1 and can help to balance the brightness of the different channels. 
This option also ensures that channels occupy the full intensity range 
available, which is useful for displaying images in other software.

This rescaling is applied before any multiplication factors set in this 
module's options. Using a multiplication factor >1 would therefore result 
in clipping."""
        )

        # # # # # # # # # # # # # # # #
        #
        # RGB settings
        #
        # # # # # # # # # # # # # # # #
        self.red_image_name = ImageSubscriber(
            "Select the image to be colored red",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in red.
"""
            % globals(),
        )

        self.green_image_name = ImageSubscriber(
            "Select the image to be colored green",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in green.
"""
            % globals(),
        )

        self.blue_image_name = ImageSubscriber(
            "Select the image to be colored blue",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in blue.
"""
            % globals(),
        )

        self.rgb_image_name = ImageName(
            "Name the output image",
            "ColorImage",
            doc="""Enter a name for the resulting image.""",
        )

        self.red_adjustment_factor = Float(
            "Relative weight for the red image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the red image. If all relative weights are
equal, all three colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.green_adjustment_factor = Float(
            "Relative weight for the green image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the green image. If all relative weights
are equal, all three colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.blue_adjustment_factor = Float(
            "Relative weight for the blue image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the blue image. If all relative weights
are equal, all three colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )
        # # # # # # # # # # # # # #
        #
        # CYMK settings
        #
        # # # # # # # # # # # # # #
        self.cyan_image_name = ImageSubscriber(
            "Select the image to be colored cyan",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in cyan.
"""
            % globals(),
        )

        self.magenta_image_name = ImageSubscriber(
            "Select the image to be colored magenta",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in magenta.
"""
            % globals(),
        )

        self.yellow_image_name = ImageSubscriber(
            "Select the image to be colored yellow",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in yellow.
"""
            % globals(),
        )

        self.gray_image_name = ImageSubscriber(
            "Select the image that determines brightness",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image that will determine each pixel's brightness.
"""
            % globals(),
        )

        self.cyan_adjustment_factor = Float(
            "Relative weight for the cyan image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the cyan image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.magenta_adjustment_factor = Float(
            "Relative weight for the magenta image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the magenta image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.yellow_adjustment_factor = Float(
            "Relative weight for the yellow image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the yellow image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.gray_adjustment_factor = Float(
            "Relative weight for the brightness image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the brightness image. If all relative
weights are equal, all colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        # # # # # # # # # # # # # #
        #
        # Stack settings
        #
        # # # # # # # # # # # # # #

        self.stack_channels = []
        self.stack_channel_count = HiddenCount(self.stack_channels)
        self.add_stack_channel_cb(can_remove=False)
        self.add_stack_channel = DoSomething(
            "Add another channel",
            "Add another channel",
            self.add_stack_channel_cb,
            doc="""\
    Press this button to add another image to the stack.
    """,
        )
コード例 #29
0
    def create_settings(self):
        """Create the UI settings for the module"""
        self.seed_objects_name = LabelSubscriber(
            "Select the seed objects",
            "None",
            doc="""\
Select the previously identified objects that you want to use as the
seeds for measuring branches and distances. Branches and trunks are assigned
per seed object. Seed objects are typically not single points/pixels but
instead are usually objects of varying sizes.""",
        )

        self.image_name = ImageSubscriber(
            "Select the skeletonized image",
            "None",
            doc="""\
Select the skeletonized image of the dendrites and/or axons as produced
by the **Morph** module’s *Skel* operation.""",
        )

        self.wants_branchpoint_image = Binary(
            "Retain the branchpoint image?",
            False,
            doc="""\
Select "*Yes*" if you want to save the color image of branchpoints and
trunks. This is the image that is displayed in the output window for
this module."""
            % globals(),
        )

        self.branchpoint_image_name = ImageName(
            "Name the branchpoint image",
            "BranchpointImage",
            doc="""\
*(Used only if a branchpoint image is to be retained)*

Enter a name for the branchpoint image here. You can then use this image
in a later module, such as **SaveImages**.""",
        )

        self.wants_to_fill_holes = Binary(
            "Fill small holes?",
            True,
            doc="""\
The algorithm reskeletonizes the image and this can leave artifacts
caused by small holes in the image prior to skeletonizing. These holes
result in false trunks and branchpoints. Select "*Yes*" to fill in
these small holes prior to skeletonizing."""
            % globals(),
        )

        self.maximum_hole_size = Integer(
            "Maximum hole size",
            10,
            minval=1,
            doc="""\
*(Used only when filling small holes)*

This is the area of the largest hole to fill, measured in pixels. The
algorithm will fill in any hole whose area is this size or smaller.""",
        )

        self.wants_objskeleton_graph = Binary(
            "Export the skeleton graph relationships?",
            False,
            doc="""\
Select "*Yes*" to produce an edge file and a vertex file that gives the
relationships between vertices (trunks, branchpoints and endpoints)."""
            % globals(),
        )

        self.intensity_image_name = ImageSubscriber(
            "Intensity image",
            "None",
            doc="""\
Select the image to be used to calculate
the total intensity along the edges between the vertices (trunks, branchpoints, and endpoints).""",
        )

        self.directory = Directory(
            "File output directory",
            doc="Select the directory you want to save the graph relationships to.",
            dir_choices=[
                DEFAULT_OUTPUT_FOLDER_NAME,
                DEFAULT_INPUT_FOLDER_NAME,
                ABSOLUTE_FOLDER_NAME,
                DEFAULT_OUTPUT_SUBFOLDER_NAME,
                DEFAULT_INPUT_SUBFOLDER_NAME,
            ],
        )
        self.directory.dir_choice = DEFAULT_OUTPUT_FOLDER_NAME

        self.vertex_file_name = Text(
            "Vertex file name",
            "vertices.csv",
            doc="""\
*(Used only when exporting graph relationships)*

Enter the name of the file that will hold the edge information. You can
use metadata tags in the file name.

Each line of the file is a row of comma-separated values. The first
row is the header; this names the file’s columns. Each subsequent row
represents a vertex in the skeleton graph: either a trunk, a
branchpoint or an endpoint. The file has the following columns:

-  *image\_number:* The image number of the associated image.
-  *vertex\_number:* The number of the vertex within the image.
-  *i:* The I coordinate of the vertex.
-  *j:* The J coordinate of the vertex.
-  *label:* The label of the seed object associated with the vertex.
-  *kind:* The vertex type, with the following choices:

   -  **T:** Trunk
   -  **B:** Branchpoint
   -  **E:** Endpoint
""",
        )

        self.edge_file_name = Text(
            "Edge file name",
            "edges.csv",
            doc="""\
*(Used only when exporting graph relationships)*

Enter the name of the file that will hold the edge information. You can
use metadata tags in the file name. Each line of the file is a row of
comma-separated values. The first row is the header; this names the
file’s columns. Each subsequent row represents an edge or connection
between two vertices (including between a vertex and itself for certain
loops). Note that vertices include trunks, branchpoints, and endpoints.

The file has the following columns:

-  *image\_number:* The image number of the associated image.
-  *v1:* The zero-based index into the vertex table of the first vertex
   in the edge.
-  *v2:* The zero-based index into the vertex table of the second vertex
   in the edge.
-  *length:* The number of pixels in the path connecting the two
   vertices, including both vertex pixels.
-  *total\_intensity:* The sum of the intensities of the pixels in the
   edge, including both vertex pixel intensities.
""",
        )