コード例 #1
0
ファイル: calculatemath.py プロジェクト: mfs4rd/CellProfiler
            def __init__(self, index, operation):
                self.__index = index
                self.__operation = operation
                self.__operand_choice = Choice(
                    self.operand_choice_text(),
                    MC_ALL,
                    doc="""Indicate whether the operand is an image or object measurement.""",
                )

                self.__operand_objects = LabelName(
                    self.operand_objects_text(),
                    "None",
                    doc="""Choose the objects you want to measure for this operation.""",
                )

                self.__operand_measurement = Measurement(
                    self.operand_measurement_text(),
                    self.object_fn,
                    doc="""\
Enter the category that was used to create the measurement. You
will be prompted to add additional information depending on
the type of measurement that is requested.""",
                )

                self.__multiplicand = Float(
                    "Multiply the above operand by",
                    1,
                    doc="""Enter the number by which you would like to multiply the above operand.""",
                )

                self.__exponent = Float(
                    "Raise the power of above operand by",
                    1,
                    doc="""Enter the power by which you would like to raise the above operand.""",
                )
コード例 #2
0
 def test_01_02_set_value(self):
     s = Float("foo", value=5)
     for test_case in ("6.00", "-1.75"):
         s.value_text = test_case
         assert s == float(test_case)
         assert s.value_text == test_case
         s.test_valid(None)
コード例 #3
0
def convert_java_type_to_setting(param_name, param_type, param_class):
    """
    Helper method to convert ImageJ/Java class parameter types to CellProfiler settings

    Parameters
    ----------
    param_name : str, required
        The name of the parameter
    param_type : str, required
        The Java class name describing the parameter type
    param_class: str, required
        One of {input_class} or {output_class}, based on the parameter use

    Returns
    ---------
    A new Setting of a type appropriate for param_type, named with param_name. Or None if no valid conversion exists.
    """
    type_string = param_type.split()[1]
    img_strings = ("ij.ImagePlus", "net.imagej.Dataset", "net.imagej.ImgPlus")
    if INPUT_CLASS == param_class:
        param_label = param_name
        if type_string == "java.lang.String":
            return Alphanumeric(param_label, "")
        if type_string == "java.lang.Character":
            return Character(param_label, "")
        elif type_string == "java.lang.Integer":
            return Integer(param_label, 0, minval=-2 ** 31, maxval=((2 ** 31) - 1))
        elif type_string == "java.lang.Long":
            return Integer(param_label, 0, minval=-2 ** 63, maxval=((2 ** 63) - 1))
        elif type_string == "java.lang.Short":
            return Integer(param_label, 0, minval=-32768, maxval=32767)
        elif type_string == "java.lang.Byte":
            return Integer(param_label, 0, minval=-128, maxval=127)
        elif type_string == "java.lang.Boolean":
            return Boolean(param_label, 0)
        elif type_string == "java.lang.Float":
            return Float(param_label, minval=-2 ** 31, maxval=((2 ** 31) - 1))
        elif type_string == "java.lang.Double":
            return Float(param_label, minval=-2 ** 63, maxval=((2 ** 63) - 1))
        elif type_string == "java.io.File":
            return Filename(param_label, "")
        elif bool((img_string for img_string in img_strings if type_string == img_string)):
            return ImageSubscriber(param_label)
    elif OUTPUT_CLASS == param_class:
        if bool((img_string for img_string in img_strings if type_string == img_string)):
            return ImageName("[OUTPUT, " + type_string + "] " + param_name, param_name, doc=
            """
            You may use this setting to rename the indicated output variable, if desired.
            """
                             )

    return None
コード例 #4
0
    def create_settings(self):
        #
        # The superclass (cellprofiler_core.module.ImageProcessing) defines two
        # settings for image input and output:
        #
        # -  x_name: an ImageSubscriber which "subscribes" to all
        #    ImageNameProviders in prior modules. Modules before yours will
        #    put images into CellProfiler. The ImageSubscriber gives
        #    your user a list of these images which can then be used as inputs
        #    in your module.
        # -  y_name: an text.ImageName makes the image available to subsequent
        #    modules.
        super(RescaleMeanSD, self).create_settings()

        #
        # reST help that gets displayed when the user presses the
        # help button to the right of the edit box.
        #
        # The superclass defines some generic help test. You can add
        # module-specific help text by modifying the setting's "doc"
        # string.
        #
        self.x_name.doc = """\
This is the image that the module operates on. You can choose any image
that is made available by a prior module.
**RescaleMeanSD** will do something to this image.
"""
        # We use a float setting so that the user can give us a number
        # for the scale. The control will turn red if the user types in
        # an invalid scale.
        #
        self.mean = Float(
            text="Mean",
            value=0.5,  # The default value is 1 - a short-range scale
            minval=0.0001,  # We don't let the user type in really small values
            maxval=1,  # or large values
            doc="""\
This is the average intensity the image will have after rescaling. Images will be scaled between 0 and 1.
""")
        # I repeat the same for the standard deviation
        self.sd = Float(
            text="Standard Deviation",
            value=0.125,  # The default value is 1 - a short-range scale
            minval=0.0001,  # We don't let the user type in really small values
            maxval=0.5,  # or large values
            doc="""\
This is the standard deviation of intensity values after rescaling. Images will be scaled between 0 and 1.
""")
コード例 #5
0
    def create_settings(self):
        super(FindMaxima, self).create_settings()

        self.min_distance = Integer(
            text="Minimum distance between maxima",
            value=5,
            minval=0,
            doc="""Choose the minimum distance between accepted local maxima"""
        )

        self.exclude_mode = Choice("Method for excluding background",
                                   [MODE_THRESHOLD, MODE_MASK, MODE_OBJECTS],
                                   value="Threshold",
                                   doc=f"""\
By default, local maxima will be searched for across the whole image. This means that maxima will be found in 
areas that consist entirely of background. To resolve this we have several methods to exclude background.

**{MODE_THRESHOLD}** allows you to specify a minimum pixel intensity to be considered as a peak. Setting this to 0
effectively uses no threshold.

**{MODE_MASK}** will restrict peaks to areas which are within a provided mask image. This mask will typically come from 
the threshold module or another means of finding background.

**{MODE_OBJECTS}** will restrict peaks to areas within an existing set of objects.
""")

        self.min_intensity = Float("Specify the minimum intensity of a peak",
                                   0,
                                   minval=0,
                                   maxval=99,
                                   doc="""\
Intensity peaks below this threshold value will be excluded. Use this to ensure that your local 
maxima are within objects of interest.""")

        self.mask_image = ImageSubscriber(
            "Select the image to use as a mask",
            doc=
            "Select the image you want to use. This should be a binary image.")

        self.mask_objects = LabelSubscriber(
            "Select the objects to search within",
            doc="Select the objects within which to search for peaks.")

        self.maxima_color = Color(
            "Select maxima preview color",
            "Blue",
            doc="Maxima will be displayed in this color.",
        )

        self.maxima_size = Integer(
            "Select maxima preview size",
            value=1,
            minval=1,
            doc=
            "Size of the markers for each maxima in the preview. Positive pixels will be"
            "expanded by this radius."
            "You may want to increase this when working with large images.",
        )

        self.spacer = Divider(line=True)
コード例 #6
0
    def create_settings(self):
        super(OverlayObjects, self).create_settings()

        self.x_name.text = "Input"

        self.x_name.doc = "Objects will be overlaid on this image."

        self.y_name.doc = (
            "An RGB image with color-coded labels overlaid on a grayscale image."
        )

        self.objects = LabelSubscriber(
            text="Objects",
            doc="Color-coded labels of this object will be overlaid on the input image.",
        )

        self.opacity = Float(
            text="Opacity",
            value=0.3,
            minval=0.0,
            maxval=1.0,
            doc="""
            Opacity of overlaid labels. Increase this value to decrease the transparency of the colorized object
            labels.
            """,
        )
コード例 #7
0
    def add_channel(self, can_remove=True):
        """Add another channel to the channels list"""
        group = SettingsGroup()
        group.can_remove = can_remove
        group.append(
            "channel_choice",
            Integer(
                text="Channel number",
                value=len(self.channels) + 1,
                minval=1,
                doc="""\
*(Used only when splitting images)*

This setting chooses a channel to be processed. For example, *1*
is the first
channel in a .TIF or the red channel in a traditional image file.
*2* and *3* are the second and third channels of a TIF or
the green and blue channels in other formats. *4* is the
transparency channel for image formats that support transparency and is
channel # 4 for a .TIF file. **ColorToGray** will fail to process an
image if you select a channel that is not supported by that image, for
example, “5” for a three-channel .PNG file.""",
            ),
        )

        group.append(
            "contribution",
            Float(
                "Relative weight of the channel",
                1,
                0,
                doc="""\
*(Used only when combining channels)*

Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.""",
            ),
        )

        group.append(
            "image_name",
            ImageName(
                "Image name",
                value="Channel%d" % (len(self.channels) + 1),
                doc="""\
*(Used only when splitting images)*

Select the name of the output grayscale image.""",
            ),
        )

        if group.can_remove:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this channel", self.channels, group),
            )
        self.channels.append(group)
コード例 #8
0
    def create_settings(self):
        super(RemoveHoles, self).create_settings()

        self.size = Float(
            text="Size of holes to fill",
            value=1.0,
            doc=
            "Holes smaller than this diameter will be filled. Note that for 3D\
            images this module operates volumetrically so diameters should be given in voxels",
        )
コード例 #9
0
    def create_settings(self):
        super(RandomWalkerAlgorithm, self).create_settings()

        self.first_phase = Float(
            doc="First phase demarcates an image’s first phase.",
            text="First phase",
            value=0.5)

        self.second_phase = Float(
            doc="Second phase demarcates an image’s second phase.",
            text="Second phase",
            value=0.5)

        self.beta = Float(doc="""
                Beta is the penalization coefficient for the random walker motion. Increasing the penalization
                coefficient increases the difficulty of the diffusion. Likewise, decreasing the penalization coefficient
                decreases the difficulty of the diffusion.
            """,
                          text="Beta",
                          value=130.0)
コード例 #10
0
    def add_stack_channel_cb(self, can_remove=True):
        group = SettingsGroup()
        default_color = DEFAULT_COLORS[len(self.stack_channels) % len(DEFAULT_COLORS)]
        group.append(
            "image_name",
            ImageSubscriber(
                "Image name",
                "None",
                doc="""\
*(Used only if "%(SCHEME_STACK)s" or "%(SCHEME_COMPOSITE)s" is chosen)*

Select the input image to add to the stacked image.
"""
                % globals(),
            ),
        )
        group.append(
            "color",
            Color(
                "Color",
                default_color,
                doc="""\
*(Used only if "%(SCHEME_COMPOSITE)s" is chosen)*

The color to be assigned to the above image.
"""
                % globals(),
            ),
        )
        group.append(
            "weight",
            Float(
                "Weight",
                1.0,
                minval=0.5 / 255,
                doc="""\
*(Used only if "%(SCHEME_COMPOSITE)s" is chosen)*

The weighting of the above image relative to the others. The image’s
pixel values are multiplied by this weight before assigning the color.
"""
                % globals(),
            ),
        )

        if can_remove:
            group.append(
                "remover",
                RemoveSettingButton(
                    "", "Remove this image", self.stack_channels, group
                ),
            )
        self.stack_channels.append(group)
コード例 #11
0
    def create_settings(self):
        super(ResizeObjects, self).create_settings()

        self.method = Choice(
            "Method",
            ["Dimensions", "Factor", "Match Image"],
            doc="""\
The following options are available:

-  *Dimensions:* Enter the new height and width of the resized objects.
-  *Factor:* Enter a single value which specifies the scaling.""",
            value="Factor",
        )

        self.factor = Float(
            "Factor",
            0.25,
            minval=0,
            doc="""\
*(Used only if resizing by "Factor")*

Numbers less than 1 will shrink the objects; numbers greater than 1 will
enlarge the objects.""",
        )

        self.width = Integer(
            "Width",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by "Dimensions")*

Enter the desired width of the final objects, in pixels.""",
        )

        self.height = Integer(
            "Height",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by "Dimensions")*

Enter the desired height of the final objects, in pixels.""",
        )

        self.specific_image = ImageSubscriber(
            "Select the image with the desired dimensions",
            "None",
            doc="""\
        *(Used only if resizing by specifying desired final dimensions using an image)*

        The input object set will be resized to the dimensions of the specified image.""",
        )
コード例 #12
0
    def create_settings(self):
        # choose the tracked objects to measure TrAM on
        self.object_name = LabelSubscriber(
                "Tracked objects", "None", doc="""
            Select the tracked objects for computing TrAM.""")

        # which measurements will go into the TrAM computation
        self.tram_measurements = MeasurementMultiChoiceForCategory(
            "TrAM measurements", category_chooser=self.object_name, doc="""
            These are measurements for the selected tracked objects which
            will be used in the TrAM computation. At least one must be selected.""")

        # Treat X-Y value pairs as isotropic in the TrAM measure?
        self.isotropic = cps.Binary(
            'Isotropic XY metric?', True, doc="""
            If selected (the default) then measurements that are available
            as X-Y pairs (e.g. location) will be have an isotropic
            metric applied in TrAM. Note that the X-Y-Z extension of this feature
            is not currently available.
            """)

        # number of spline knots
        self.num_knots = Integer(
            "Number of spline knots", 4, minval=self.MIN_NUM_KNOTS, doc="""
            The number of knots (indpendent values) used
            when computing smoothing splines. This should be around 1/5th the number
            of frames for reasonably oversampled time lapse sequences, and must be 3
            or greater. It is approximately the maximum number of wiggles expected in
            well-tracked trajectories
            """)

        # TrAM exponent
        self.tram_exponent = Float(
            "TrAM exponent", 0.5, minval=0.01, maxval=1.0, doc="""
            This number is between 0.01 and 1 (default 0.5), and specifies how
            strongly simultaneous sudden changes in multiple features synergize in
            the TrAM metric. A lower value signifies higher synergy (at the risk of
            missing tracking failures that are reflected in only some of the features).
            """)
コード例 #13
0
    def create_settings(self):
        super(GammaCorrection, self).create_settings()

        self.gamma = Float(doc="""
            A gamma value < 1 is an encoding gamma, and the process of
            encoding with this compressive power-law non-linearity, gamma
            compression, darkens images; conversely a gamma value > 1 is a
            decoding gamma and the application of the expansive power-law
            non-linearity, gamma expansion, brightens images.
            """,
                           maxval=100.0,
                           minval=0.0,
                           text="Gamma",
                           value=1.0)
コード例 #14
0
    def create_settings(self):
        super(FillObjects, self).create_settings()

        self.size = Float(
            text="Minimum hole size",
            value=64.0,
            doc="Holes smaller than this diameter will be filled.",
        )

        self.planewise = Binary(
            text="Planewise fill",
            value=False,
            doc="""\
Select "*{YES}*" to fill objects on a per-plane level. 
This will perform the hole filling on each plane of a 
volumetric image, rather than on the image as a whole. 
This may be helpful for removing seed artifacts that 
are the result of segmentation.
**Note**: Planewise operations will be considerably slower.
""".format(**{"YES": "Yes"}),
        )
コード例 #15
0
    def create_settings(self):
        super(ReduceNoise, self).create_settings()

        self.size = Integer(
            text="Size", value=7, doc="Size of the patches to use for noise reduction."
        )

        self.distance = Integer(
            text="Distance",
            value=11,
            doc="Maximal distance in pixels to search for patches to use for denoising.",
        )

        self.cutoff_distance = Float(
            text="Cut-off distance",
            value=0.1,
            doc="""\
The permissiveness in accepting patches. Increasing the cut-off distance increases
the smoothness of the image. Likewise, decreasing the cut-off distance decreases the smoothness of the
image.
            """,
        )
コード例 #16
0
    def create_settings(self):
        #
        # The superclass (cellprofiler_core.module.ImageProcessing) defines two
        # settings for image input and output:
        #
        # -  x_name: an ImageSubscriber which "subscribes" to all
        #    ImageNameProviders in prior modules. Modules before yours will
        #    put images into CellProfiler. The ImageSubscriber gives
        #    your user a list of these images which can then be used as inputs
        #    in your module.
        # -  y_name: an text.ImageName makes the image available to subsequent
        #    modules.
        super(RescaleModePercentile, self).create_settings()

        #
        # reST help that gets displayed when the user presses the
        # help button to the right of the edit box.
        #
        # The superclass defines some generic help test. You can add
        # module-specific help text by modifying the setting's "doc"
        # string.
        #
        self.x_name.doc = """\
This is the image that the module operates on. You can choose any image
that is made available by a prior module.
**RescaleModePercentile** will do something to this image.
"""
        # We use a float setting so that the user can give us a number
        # for the scale. The control will turn red if the user types in
        # an invalid scale.
        #
        self.percent = Float(
            text="Percentile",
            value=99.99,  # The default value is 1 - a short-range scale
            minval=0.0001,  # We don't let the user type in really small values
            maxval=100,  # or large values
            doc="""\
This sets the image specific maxium intensity. A value of 99.99% means that the 99.99% intensity percentile is defined to have the intensity 1.
""")
コード例 #17
0
ファイル: fillobjects.py プロジェクト: cskv/CellProfiler
    def create_settings(self):
        super(FillObjects, self).create_settings()

        self.size = Float(
            text="Minimum hole size",
            value=64.0,
            doc="Holes smaller than this diameter will be filled.",
        )

        self.planewise = Binary(
            text="Planewise fill",
            value=False,
            doc="""\
Select "*{YES}*" to fill objects on a per-plane level. 
This will perform the hole filling on each plane of a 
volumetric image, rather than on the image as a whole. 
This may be helpful for removing seed artifacts that 
are the result of segmentation.
**Note**: Planewise operations will be considerably slower.
""".format(**{"YES": "Yes"}),
        )

        self.mode = Choice("Filling method", [MODE_HOLES, MODE_CHULL],
                           value=MODE_HOLES,
                           doc=f"""\
Choose the mode for hole filling.

In {MODE_HOLES} mode, the module will search for and fill holes entirely enclosed by
each object. Size of the holes to be removed can be controlled. 

In {MODE_CHULL} mode, the module will apply the convex hull of each object to fill 
missing pixels. This can be useful when round objects have partial holes that are 
not entirely enclosed.

Note: Convex hulls for each object are applied sequentially and may overlap. This means 
that touching objects may not be perfectly convex if there was a region of overlap. 
""")
コード例 #18
0
    def create_settings(self):
        """Create the settings that control this module"""
        self.object_name = LabelSubscriber(
            "Select objects to be masked",
            "None",
            doc="""\
Select the objects that will be masked (that is, excluded in whole or in
part based on the other settings in the module). You can choose from any
objects created by a previous object processing module, such as
**IdentifyPrimaryObjects**, **IdentifySecondaryObjects** or
**IdentifyTertiaryObjects**.
""",
        )

        self.remaining_objects = LabelName(
            "Name the masked objects",
            "MaskedNuclei",
            doc="""\
Enter a name for the objects that remain after
the masking operation. You can refer to the masked objects in
subsequent modules by this name.
""",
        )

        self.mask_choice = Choice(
            "Mask using a region defined by other objects or by binary image?",
            [MC_OBJECTS, MC_IMAGE],
            doc="""\
You can mask your objects by defining a region using objects you
previously identified in your pipeline (*%(MC_OBJECTS)s*) or by
defining a region based on the white regions in a binary image
previously loaded or created in your pipeline (*%(MC_IMAGE)s*).
"""
            % globals(),
        )

        self.masking_objects = LabelSubscriber(
            "Select the masking object",
            "None",
            doc="""\
*(Used only if mask is to be made from objects)*

Select the objects that will be used to define the masking region. You
can choose from any objects created by a previous object processing
module, such as **IdentifyPrimaryObjects**,
**IdentifySecondaryObjects**, or **IdentifyTertiaryObjects**.
""",
        )

        self.masking_image = ImageSubscriber(
            "Select the masking image",
            "None",
            doc="""\
*(Used only if mask is to be made from an image)*

Select an image that was either loaded or created by a previous module.
The image should be a binary image where the white portion of the image
is the region(s) you will use for masking. Binary images can be loaded
from disk using the **NamesAndTypes** module by selecting “Binary mask”
for the image type. You can also create a binary image from a grayscale
image using **ApplyThreshold**.
""",
        )

        self.wants_inverted_mask = Binary(
            "Invert the mask?",
            False,
            doc="""\
This option reverses the foreground/background relationship of the mask.

-  Select "*No*" for the mask to be composed of the foreground (white
   portion) of the masking image or the area within the masking objects.
-  Select "*Yes*" for the mask to instead be composed of the
   *background* (black portions) of the masking image or the area
   *outside* the masking objects.
   """
            % globals(),
        )

        self.overlap_choice = Choice(
            "Handling of objects that are partially masked",
            [P_MASK, P_KEEP, P_REMOVE, P_REMOVE_PERCENTAGE],
            doc="""\
An object might partially overlap the mask region, with pixels both
inside and outside the region. **MaskObjects** can handle this in one
of three ways:

-  *%(P_MASK)s:* Choosing this option will reduce the size of partially
   overlapping objects. The part of the object that overlaps the masking
   region will be retained. The part of the object that is outside of the
   masking region will be removed.
-  *%(P_KEEP)s:* If you choose this option, **MaskObjects** will keep
   the whole object if any part of it overlaps the masking region.
-  *%(P_REMOVE)s:* Objects that are partially outside of the masking
   region will be completely removed if you choose this option.
-  *%(P_REMOVE_PERCENTAGE)s:* Determine whether to remove or keep an
   object depending on how much of the object overlaps the masking
   region. **MaskObjects** will keep an object if at least a certain
   fraction (which you enter below) of the object falls within the
   masking region. **MaskObjects** completely removes the object if too
   little of it overlaps the masking region."""
            % globals(),
        )

        self.overlap_fraction = Float(
            "Fraction of object that must overlap",
            0.5,
            minval=0,
            maxval=1,
            doc="""\
*(Used only if removing based on overlap)*

Specify the minimum fraction of an object that must overlap the masking
region for that object to be retained. For instance, if the fraction is
0.75, then 3/4 of an object must be within the masking region for that
object to be retained.
""",
        )

        self.retain_or_renumber = Choice(
            "Numbering of resulting objects",
            [R_RENUMBER, R_RETAIN],
            doc="""\
Choose how to number the objects that remain after masking, which
controls how remaining objects are associated with their predecessors:

-  *%(R_RENUMBER)s:* The objects that remain will be renumbered using
   consecutive numbers. This is a good choice if you do not plan to use
   measurements from the original objects; your object measurements for
   the masked objects will not have gaps (where removed objects are
   missing).
-  *%(R_RETAIN)s:* The original labels for the objects will be
   retained. This allows any measurements you make from the masked
   objects to be directly aligned with measurements you might have made
   of the original, unmasked objects (or objects directly associated
   with them).
"""
            % globals(),
        )
コード例 #19
0
    def create_settings(self):
        self.scheme_choice = Choice(
            "Select a color scheme",
            [SCHEME_RGB, SCHEME_CMYK, SCHEME_STACK, SCHEME_COMPOSITE],
            doc="""\
This module can use one of two color schemes to combine images:

-  *%(SCHEME_RGB)s*: Each input image determines the intensity of one
   of the color channels: red, green, and blue.
-  *%(SCHEME_CMYK)s*: Three of the input images are combined to
   determine the colors (cyan, magenta, and yellow) and a fourth is used
   only for brightness. The cyan image adds equally to the green and
   blue intensities. The magenta image adds equally to the red and blue
   intensities. The yellow image adds equally to the red and green
   intensities.
-  *%(SCHEME_STACK)s*: The channels are stacked in the order listed,
   from top to bottom. An arbitrary number of channels is allowed.

   For example, you could create a 5-channel image by providing
   5 grayscale images. The first grayscale image you provide will fill
   the first channel, the second grayscale image you provide will fill
   the second channel, and so on.
-  *%(SCHEME_COMPOSITE)s*: A color is assigned to each grayscale image.
   Each grayscale image is converted to color by multiplying the
   intensity by the color and the resulting color images are added
   together. An arbitrary number of channels can be composited into a
   single color image.
"""
            % globals(),
        )

        self.wants_rescale = Binary(
            "Rescale intensity",
            True,
            doc="""\
Choose whether to rescale each channel individually to 
the range of 0-1. This prevents clipping of channels with intensity 
above 1 and can help to balance the brightness of the different channels. 
This option also ensures that channels occupy the full intensity range 
available, which is useful for displaying images in other software.

This rescaling is applied before any multiplication factors set in this 
module's options. Using a multiplication factor >1 would therefore result 
in clipping."""
        )

        # # # # # # # # # # # # # # # #
        #
        # RGB settings
        #
        # # # # # # # # # # # # # # # #
        self.red_image_name = ImageSubscriber(
            "Select the image to be colored red",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in red.
"""
            % globals(),
        )

        self.green_image_name = ImageSubscriber(
            "Select the image to be colored green",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in green.
"""
            % globals(),
        )

        self.blue_image_name = ImageSubscriber(
            "Select the image to be colored blue",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Select the input image to be displayed in blue.
"""
            % globals(),
        )

        self.rgb_image_name = ImageName(
            "Name the output image",
            "ColorImage",
            doc="""Enter a name for the resulting image.""",
        )

        self.red_adjustment_factor = Float(
            "Relative weight for the red image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the red image. If all relative weights are
equal, all three colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.green_adjustment_factor = Float(
            "Relative weight for the green image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the green image. If all relative weights
are equal, all three colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.blue_adjustment_factor = Float(
            "Relative weight for the blue image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_RGB)s" is selected as the color scheme)*

Enter the relative weight for the blue image. If all relative weights
are equal, all three colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )
        # # # # # # # # # # # # # #
        #
        # CYMK settings
        #
        # # # # # # # # # # # # # #
        self.cyan_image_name = ImageSubscriber(
            "Select the image to be colored cyan",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in cyan.
"""
            % globals(),
        )

        self.magenta_image_name = ImageSubscriber(
            "Select the image to be colored magenta",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in magenta.
"""
            % globals(),
        )

        self.yellow_image_name = ImageSubscriber(
            "Select the image to be colored yellow",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image to be displayed in yellow.
"""
            % globals(),
        )

        self.gray_image_name = ImageSubscriber(
            "Select the image that determines brightness",
            can_be_blank=True,
            blank_text=LEAVE_THIS_BLACK,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Select the input image that will determine each pixel's brightness.
"""
            % globals(),
        )

        self.cyan_adjustment_factor = Float(
            "Relative weight for the cyan image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the cyan image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.magenta_adjustment_factor = Float(
            "Relative weight for the magenta image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the magenta image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.yellow_adjustment_factor = Float(
            "Relative weight for the yellow image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the yellow image. If all relative weights
are equal, all colors contribute equally in the final image. To weight
colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        self.gray_adjustment_factor = Float(
            "Relative weight for the brightness image",
            value=1,
            minval=0,
            doc="""\
*(Used only if "%(SCHEME_CMYK)s" is selected as the color scheme)*

Enter the relative weight for the brightness image. If all relative
weights are equal, all colors contribute equally in the final image. To
weight colors relative to each other, increase or decrease the relative
weights.
"""
            % globals(),
        )

        # # # # # # # # # # # # # #
        #
        # Stack settings
        #
        # # # # # # # # # # # # # #

        self.stack_channels = []
        self.stack_channel_count = HiddenCount(self.stack_channels)
        self.add_stack_channel_cb(can_remove=False)
        self.add_stack_channel = DoSomething(
            "Add another channel",
            "Add another channel",
            self.add_stack_channel_cb,
            doc="""\
    Press this button to add another image to the stack.
    """,
        )
コード例 #20
0
    def create_settings(self):
        # the list of per image settings (name & scaling factor)
        self.images = []
        # create the first two images (the default number)
        self.add_image(False)
        self.add_image(False)

        # other settings
        self.operation = Choice(
            "Operation",
            [
                O_ADD,
                O_SUBTRACT,
                O_DIFFERENCE,
                O_MULTIPLY,
                O_DIVIDE,
                O_AVERAGE,
                O_MINIMUM,
                O_MAXIMUM,
                O_INVERT,
                O_LOG_TRANSFORM,
                O_LOG_TRANSFORM_LEGACY,
                O_AND,
                O_OR,
                O_NOT,
                O_EQUALS,
                O_NONE,
            ],
            doc="""\
Select the operation to perform. Note that if more than two images are
chosen, then operations will be performed sequentially from first to
last, e.g., for “Divide”, (Image1 / Image2) / Image3

-  *%(O_ADD)s:* Adds the first image to the second, and so on.
-  *%(O_SUBTRACT)s:* Subtracts the second image from the first.
-  *%(O_DIFFERENCE)s:* The absolute value of the difference between the
   first and second images.
-  *%(O_MULTIPLY)s:* Multiplies the first image by the second.
-  *%(O_DIVIDE)s:* Divides the first image by the second.
-  *%(O_AVERAGE)s:* Calculates the mean intensity of the images loaded
   in the module. This is equivalent to the Add option divided by the
   number of images loaded by this module. If you would like to average
   all of the images in an entire pipeline, i.e., across cycles, you
   should instead use the **CorrectIlluminationCalculate** module and
   choose the *All* (vs. *Each*) option.
-  *%(O_MINIMUM)s:* Returns the element-wise minimum value at each
   pixel location.
-  *%(O_MAXIMUM)s:* Returns the element-wise maximum value at each
   pixel location.
-  *%(O_INVERT)s:* Subtracts the image intensities from 1. This makes
   the darkest color the brightest and vice-versa. Note that if a
   mask has been applied to the image, the mask will also be inverted.
-  *%(O_LOG_TRANSFORM)s:* Log transforms each pixel’s intensity. The
   actual function is log\ :sub:`2`\ (image + 1), transforming values
   from 0 to 1 into values from 0 to 1.
-  *%(O_LOG_TRANSFORM_LEGACY)s:* Log\ :sub:`2` transform for backwards
   compatibility.
-  *%(O_NONE)s:* This option is useful if you simply want to select some
   of the later options in the module, such as adding, multiplying, or
   exponentiating your image by a constant.

The following are operations that produce binary images. In a binary
image, the foreground has a truth value of “true” (ones) and the background has
a truth value of “false” (zeros). The operations, *%(O_OR)s, %(O_AND)s and
%(O_NOT)s* will convert the input images to binary by changing all zero
values to background (false) and all other values to foreground (true).

-  *%(O_AND)s:* a pixel in the output image is in the foreground only
   if all corresponding pixels in the input images are also in the
   foreground.
-  *%(O_OR)s:* a pixel in the output image is in the foreground if a
   corresponding pixel in any of the input images is also in the
   foreground.
-  *%(O_NOT)s:* the foreground of the input image becomes the
   background of the output image and vice-versa.
-  *%(O_EQUALS)s:* a pixel in the output image is in the foreground if
   the corresponding pixels in the input images have the same value.

Note that *%(O_INVERT)s*, *%(O_LOG_TRANSFORM)s*,
*%(O_LOG_TRANSFORM_LEGACY)s* and *%(O_NONE)s* operate on only a
single image.
""" % globals(),
        )
        self.divider_top = Divider(line=False)

        self.exponent = Float(
            "Raise the power of the result by",
            1,
            doc="""\
Enter an exponent to raise the result to *after* the chosen operation.""",
        )

        self.after_factor = Float(
            "Multiply the result by",
            1,
            doc="""\
Enter a factor to multiply the result by *after* the chosen operation.""",
        )

        self.addend = Float(
            "Add to result",
            0,
            doc="""\
Enter a number to add to the result *after* the chosen operation.""",
        )

        self.truncate_low = Binary(
            "Set values less than 0 equal to 0?",
            True,
            doc="""\
Values outside the range 0 to 1 might not be handled well by other
modules. Select *Yes* to set negative values to 0.
""" % globals(),
        )

        self.truncate_high = Binary(
            "Set values greater than 1 equal to 1?",
            True,
            doc="""\
Values outside the range 0 to 1 might not be handled well by other
modules. Select *Yes* to set values greater than 1 to a maximum
value of 1.
""" % globals(),
        )

        self.replace_nan = Binary(
            "Replace invalid values with 0?",
            True,
            doc="""\
        Certain operations are mathematically invalid (divide by zero, 
        raise a negative number to the power of a fraction, etc.).
        This setting will set pixels with invalid values to zero.
        Disabling this setting will represent these pixels as "nan" 
        ("Not A Number"). "nan" pixels cannot be displayed properly and 
        may cause errors in other modules.
        """ % globals(),
        )

        self.ignore_mask = Binary(
            "Ignore the image masks?",
            False,
            doc="""\
Select *Yes* to set equal to zero all previously masked pixels and
operate on the masked images as if no mask had been applied. Otherwise,
the smallest image mask is applied after image math has been completed.
""" % globals(),
        )

        self.output_image_name = ImageName(
            "Name the output image",
            "ImageAfterMath",
            doc="""\
Enter a name for the resulting image.""",
        )

        self.add_button = DoSomething("", "Add another image", self.add_image)

        self.divider_bottom = Divider(line=False)
コード例 #21
0
    def add_image(self, removable=True):
        # The text for these settings will be replaced in renumber_settings()
        group = SettingsGroup()
        group.removable = removable
        group.append(
            "image_or_measurement",
            Choice(
                "Image or measurement?",
                [IM_IMAGE, IM_MEASUREMENT],
                doc="""\
You can perform math operations using two images or you can use a
measurement for one of the operands. For instance, to divide the
intensity of one image by another, choose *%(IM_IMAGE)s* for both and
pick the respective images. To divide the intensity of an image by its
median intensity, use **MeasureImageIntensity** prior to this module to
calculate the median intensity, then select *%(IM_MEASUREMENT)s* and
use the median intensity measurement as the denominator.
""" % globals(),
            ),
        )

        group.append(
            "image_name",
            ImageSubscriber(
                "Select the image",
                "None",
                doc="""\
Select the image that you want to use for this operation.""",
            ),
        )

        group.append(
            "measurement",
            Measurement(
                "Measurement",
                lambda: "Image",
                "",
                doc="""\
Select a measurement made on the image. The value of the
measurement is used for the operand for all of the pixels of the
other operand's image.""",
            ),
        )

        group.append(
            "factor",
            Float(
                "Multiply the image by",
                1,
                doc="""\
Enter the number that you would like to multiply the above image by. This multiplication
is applied before other operations.""",
            ),
        )

        if removable:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this image", self.images,
                                    group),
            )

        group.append("divider", Divider())
        self.images.append(group)
コード例 #22
0
    def create_settings(self):
        """Create the settings for the module

        Create the settings for the module during initialization.
        """
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="""\
The name of a binary image from a previous module. **IdentifyDeadWorms**
will use this image to establish the foreground and background for the
fitting operation. You can use **ApplyThreshold** to threshold a
grayscale image and create the binary mask. You can also use a module
such as **IdentifyPrimaryObjects** to label each worm and then use
**ConvertObjectsToImage** to make the result a mask.
""",
        )

        self.object_name = LabelName(
            "Name the dead worm objects to be identified",
            "DeadWorms",
            doc="""\
This is the name for the dead worm objects. You can refer
to this name in subsequent modules such as
**IdentifySecondaryObjects**""",
        )

        self.worm_width = Integer(
            "Worm width",
            10,
            minval=1,
            doc="""\
This is the width (the short axis), measured in pixels,
of the diamond used as a template when
matching against the worm. It should be less than the width
of a worm.""",
        )

        self.worm_length = Integer(
            "Worm length",
            100,
            minval=1,
            doc="""\
This is the length (the long axis), measured in pixels,
of the diamond used as a template when matching against the
worm. It should be less than the length of a worm""",
        )

        self.angle_count = Integer(
            "Number of angles",
            32,
            minval=1,
            doc="""\
This is the number of different angles at which the template will be
tried. For instance, if there are 12 angles, the template will be
rotated by 0°, 15°, 30°, 45° … 165°. The shape is bilaterally symmetric;
that is, you will get the same shape after rotating it by 180°.
""",
        )

        self.wants_automatic_distance = Binary(
            "Automatically calculate distance parameters?",
            True,
            doc="""\
This setting determines whether or not **IdentifyDeadWorms**
automatically calculates the parameters used to determine whether two
found-worm centers belong to the same worm.

Select "*Yes*" to have **IdentifyDeadWorms** automatically calculate
the distance from the worm length and width. Select "*No*" to set the
distances manually.
"""
            % globals(),
        )

        self.space_distance = Float(
            "Spatial distance",
            5,
            minval=1,
            doc="""\
*(Used only if not automatically calculating distance parameters)*

Enter the distance for calculating the worm centers, in units of pixels.
The worm centers must be at least many pixels apart for the centers to
be considered two separate worms.
""",
        )

        self.angular_distance = Float(
            "Angular distance",
            30,
            minval=1,
            doc="""\
*(Used only if automatically calculating distance parameters)*

**IdentifyDeadWorms** calculates the worm centers at different angles.
Two worm centers are considered to represent different worms if their
angular distance is larger than this number. The number is measured in
degrees.
""",
        )
コード例 #23
0
    def create_settings(self):
        super(RunCellpose, self).create_settings()

        self.expected_diameter = Integer(
            text="Expected object diameter",
            value=15,
            minval=0,
            doc="""\
The average diameter of the objects to be detected. Setting this to 0 will attempt to automatically detect object size.
Note that automatic diameter mode does not work when running on 3D images.

Cellpose models come with a pre-defined object diameter. Your image will be resized during detection to attempt to 
match the diameter expected by the model. The default models have an expected diameter of ~16 pixels, if trying to 
detect much smaller objects it may be more efficient to resize the image first using the Resize module.
""",
        )

        self.mode = Choice(
            text="Detection mode",
            choices=[MODE_NUCLEI, MODE_CELLS, MODE_CUSTOM],
            value=MODE_NUCLEI,
            doc="""\
CellPose comes with models for detecting nuclei or cells. Alternatively, you can supply a custom-trained model 
generated using the command line or Cellpose GUI. Custom models can be useful if working with unusual cell types.
""",
        )

        self.use_gpu = Binary(text="Use GPU",
                              value=False,
                              doc=f"""\
If enabled, Cellpose will attempt to run detection on your system's graphics card (GPU). 
Note that you will need a CUDA-compatible GPU and correctly configured PyTorch version, see this link for details: 
{CUDA_LINK}

If disabled or incorrectly configured, Cellpose will run on your CPU instead. This is much slower but more compatible 
with different hardware setups.

Note that, particularly when in 3D mode, lack of GPU memory can become a limitation. If a model crashes you may need to 
re-start CellProfiler to release GPU memory. Resizing large images prior to running them through the model can free up 
GPU memory.

""")

        self.use_averaging = Binary(text="Use averaging",
                                    value=True,
                                    doc="""\
If enabled, CellPose will run it's 4 inbuilt models and take a consensus to determine the results. If disabled, only a 
single model will be called to produce results. Disabling averaging is faster to run but less accurate."""
                                    )

        self.supply_nuclei = Binary(text="Supply nuclei image as well?",
                                    value=False,
                                    doc="""
When detecting whole cells, you can provide a second image featuring a nuclear stain to assist 
the model with segmentation. This can help to split touching cells.""")

        self.nuclei_image = ImageSubscriber(
            "Select the nuclei image",
            doc="Select the image you want to use as the nuclear stain.")

        self.save_probabilities = Binary(
            text="Save probability image?",
            value=False,
            doc="""
If enabled, the probability scores from the model will be recorded as a new image. 
Probability >0 is considered as being part of a cell. 
You may want to use a higher threshold to manually generate objects.""",
        )

        self.probabilities_name = ImageName(
            "Name the probability image",
            "Probabilities",
            doc=
            "Enter the name you want to call the probability image produced by this module.",
        )

        self.model_directory = Directory(
            "Location of the pre-trained model file",
            doc=f"""\
*(Used only when using a custom pre-trained model)*

Select the location of the pre-trained CellPose model file that will be used for detection."""
        )

        def get_directory_fn():
            """Get the directory for the rules file name"""
            return self.model_directory.get_absolute_path()

        def set_directory_fn(path):
            dir_choice, custom_path = self.model_directory.get_parts_from_path(
                path)

            self.model_directory.join_parts(dir_choice, custom_path)

        self.model_file_name = Filename("Pre-trained model file name",
                                        "cyto_0",
                                        get_directory_fn=get_directory_fn,
                                        set_directory_fn=set_directory_fn,
                                        doc=f"""\
*(Used only when using a custom pre-trained model)*

This file can be generated by training a custom model withing the CellPose GUI or command line applications."""
                                        )

        self.gpu_test = DoSomething(
            "",
            "Test GPU",
            self.do_check_gpu,
            doc=f"""\
Press this button to check whether a GPU is correctly configured.

If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the 
required dependencies are not installed.

If you have multiple GPUs on your system, this button will only test the first one.
""",
        )

        self.flow_threshold = Float(
            text="Flow threshold",
            value=0.4,
            minval=0,
            doc=
            """Flow error threshold. All cells with errors below this threshold are kept. Recommended default is 0.4""",
        )

        self.dist_threshold = Float(
            text="Cell probability threshold",
            value=0.0,
            minval=0,
            doc=f"""\
Cell probability threshold (all pixels with probability above threshold kept for masks). Recommended default is 0.0. """,
        )
コード例 #24
0
    def add_measurement(self, flag_settings, can_delete=True):
        measurement_settings = flag_settings.measurement_settings

        group = SettingsGroup()
        group.append("divider1", Divider(line=False))
        group.append(
            "source_choice",
            Choice(
                "Flag is based on",
                S_ALL,
                doc="""\
-  *%(S_IMAGE)s:* A per-image measurement, such as intensity or
   granularity.
-  *%(S_AVERAGE_OBJECT)s:* The average of all object measurements in
   the image.
-  *%(S_ALL_OBJECTS)s:* All the object measurements in an image,
   without averaging. In other words, if *any* of the objects meet the
   criteria, the image will be flagged.
-  *%(S_RULES)s:* Use a text file of rules produced by CellProfiler
   Analyst. With this option, you will have to ensure that this pipeline
   produces every measurement in the rules file upstream of this module.
-  *%(S_CLASSIFIER)s:* Use a classifier built by CellProfiler Analyst.
""" % globals(),
            ),
        )

        group.append(
            "object_name",
            LabelSubscriber(
                "Select the object to be used for flagging",
                "None",
                doc="""\
*(Used only when flag is based on an object measurement)*

Select the objects whose measurements you want to use for flagging.
""",
            ),
        )

        def object_fn():
            if group.source_choice == S_IMAGE:
                return IMAGE
            return group.object_name.value

        group.append(
            "rules_directory",
            Directory(
                "Rules file location",
                doc="""\
*(Used only when flagging using "{rules}")*

Select the location of the rules file that will be used for flagging images.
{folder_choice}
""".format(rules=S_RULES, folder_choice=IO_FOLDER_CHOICE_HELP_TEXT),
            ),
        )

        def get_directory_fn():
            """Get the directory for the rules file name"""
            return group.rules_directory.get_absolute_path()

        def set_directory_fn(path):
            dir_choice, custom_path = group.rules_directory.get_parts_from_path(
                path)
            group.rules_directory.join_parts(dir_choice, custom_path)

        group.append(
            "rules_file_name",
            Filename(
                "Rules file name",
                "rules.txt",
                get_directory_fn=get_directory_fn,
                set_directory_fn=set_directory_fn,
                doc="""\
*(Used only when flagging using "%(S_RULES)s")*

The name of the rules file, most commonly from CellProfiler Analyst's
Classifier. This file should be a plain text file
containing the complete set of rules.

Each line of this file should be a rule naming a measurement to be made
on an image, for instance:

    IF (Image_ImageQuality_PowerLogLogSlope_DNA < -2.5, [0.79, -0.79], [-0.94, 0.94])

The above rule will score +0.79 for the positive category and -0.94
for the negative category for images whose power log slope is less
than -2.5 pixels and will score the opposite for images whose slope is
larger. The filter adds positive and negative and flags the images
whose positive score is higher than the negative score.
""" % globals(),
            ),
        )

        def get_rules_class_choices(group=group):
            """Get the available choices from the rules file"""
            try:
                if group.source_choice == S_CLASSIFIER:
                    return self.get_bin_labels(group)
                elif group.source_choice == S_RULES:
                    rules = self.get_rules(group)
                    nclasses = len(rules.rules[0].weights[0])
                    return [str(i) for i in range(1, nclasses + 1)]
                else:
                    return ["None"]
                rules = self.get_rules(group)
                nclasses = len(rules.rules[0].weights[0])
                return [str(i) for i in range(1, nclasses + 1)]
            except:
                return [str(i) for i in range(1, 3)]

        group.append(
            "rules_class",
            MultiChoice(
                "Class number",
                choices=["1", "2"],
                doc="""\
*(Used only when flagging using "%(S_RULES)s")*

Select which classes to flag when filtering. The CellProfiler Analyst
Classifier user interface lists the names of the classes in order. By
default, these are the positive (class 1) and negative (class 2)
classes. **FlagImage** uses the first class from CellProfiler Analyst
if you choose “1”, etc.

Please note the following:

-  The flag is set if the image falls into the selected class.
-  You can make multiple class selections. If you do so, the module will
   set the flag if the image falls into any of the selected classes.
""" % globals(),
            ),
        )

        group.rules_class.get_choices = get_rules_class_choices

        group.append(
            "measurement",
            Measurement(
                "Which measurement?",
                object_fn,
                doc="""Choose the measurement to be used as criteria.""",
            ),
        )

        group.append(
            "wants_minimum",
            Binary(
                "Flag images based on low values?",
                True,
                doc="""\
Select *Yes* to flag images with measurements below the specified
cutoff. If the measurement evaluates to Not-A-Number (NaN), then the
image is not flagged.
""" % globals(),
            ),
        )

        group.append(
            "minimum_value",
            Float("Minimum value", 0, doc="""Set a value as a lower limit."""),
        )

        group.append(
            "wants_maximum",
            Binary(
                "Flag images based on high values?",
                True,
                doc="""\
Select *Yes* to flag images with measurements above the specified
cutoff. If the measurement evaluates to Not-A-Number (NaN), then the
image is not flagged.
""" % globals(),
            ),
        )

        group.append(
            "maximum_value",
            Float("Maximum value", 1,
                  doc="""Set a value as an upper limit."""),
        )

        if can_delete:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this measurement",
                                    measurement_settings, group),
            )

        group.append("divider2", Divider(line=True))
        measurement_settings.append(group)
コード例 #25
0
ファイル: resize.py プロジェクト: yaweiyang-sz/CellProfiler
    def create_settings(self):
        super(Resize, self).create_settings()

        self.size_method = Choice(
            "Resizing method",
            R_ALL,
            doc="""\
The following options are available:

-  *Resize by a fraction or multiple of the original size:* Enter a single value which specifies the scaling.
-  *Resize by specifying desired final dimensions:* Enter the new height and width of the resized image, in units of pixels.""",
        )

        self.resizing_factor = Float(
            "Resizing factor",
            0.25,
            minval=0,
            doc="""\
*(Used only if resizing by a fraction or multiple of the original size)*

Numbers less than one (that is, fractions) will shrink the image;
numbers greater than one (that is, multiples) will enlarge the image.""",
        )

        self.use_manual_or_image = Choice(
            "Method to specify the dimensions",
            C_ALL,
            doc="""\
*(Used only if resizing by specifying the dimensions)*

You have two options on how to resize your image:

-  *{C_MANUAL}:* Specify the height and width of the output image.
-  *{C_IMAGE}:* Specify an image and the input image will be resized to the same dimensions.
            """.format(
                **{"C_IMAGE": C_IMAGE, "C_MANUAL": C_MANUAL}
            ),
        )

        self.specific_width = Integer(
            "Width of the final image",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by specifying desired final dimensions)*

Enter the desired width of the final image, in pixels.""",
        )

        self.specific_height = Integer(
            "Height of the final image",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by specifying desired final dimensions)*

Enter the desired height of the final image, in pixels.""",
        )

        self.specific_image = ImageSubscriber(
            "Select the image with the desired dimensions",
            "None",
            doc="""\
*(Used only if resizing by specifying desired final dimensions using an image)*

The input image will be resized to the dimensions of the specified image.""",
        )

        self.interpolation = Choice(
            "Interpolation method",
            I_ALL,
            doc="""\
-  *Nearest Neighbor:* Each output pixel is given the intensity of the
   nearest corresponding pixel in the input image.
-  *Bilinear:* Each output pixel is given the intensity of the weighted
   average of the 2x2 neighborhood at the corresponding position in the
   input image.
-  *Bicubic:* Each output pixel is given the intensity of the weighted
   average of the 4x4 neighborhood at the corresponding position in the
   input image.""",
        )

        self.separator = Divider(line=False)

        self.additional_images = []

        self.additional_image_count = HiddenCount(
            self.additional_images, "Additional image count"
        )

        self.add_button = DoSomething("", "Add another image", self.add_image)
コード例 #26
0
    def create_settings(self):
        #
        # The superclass (ImageProcessing) defines two
        # settings for image input and output:
        #
        # -  x_name: an ImageNameSubscriber which "subscribes" to all
        #    ImageNameProviders in prior modules. Modules before yours will
        #    put images into CellProfiler. The ImageNameSubscriber gives
        #    your user a list of these images which can then be used as inputs
        #    in your module.
        # -  y_name: an ImageName makes the image available to subsequent
        #    modules.
        super(ImageTemplate, self).create_settings()

        #
        # reST help that gets displayed when the user presses the
        # help button to the right of the edit box.
        #
        # The superclass defines some generic help test. You can add
        # module-specific help text by modifying the setting's "doc"
        # string.
        #
        self.x_name.doc = """\
This is the image that the module operates on. You can choose any image
that is made available by a prior module.

**ImageTemplate** will do something to this image.
"""

        #
        # Here's a choice box - the user gets a drop-down list of what
        # can be done.
        #
        self.gradient_choice = Choice(
            text="Gradient choice:",
            # The choice takes a list of possibilities. The first one
            # is the default - the one the user will typically choose.
            choices=[
                GRADIENT_DIRECTION_X, GRADIENT_DIRECTION_Y, GRADIENT_MAGNITUDE
            ],
            # The default value is the first choice in choices. You can
            # specify a different initial value using the value keyword.
            value=GRADIENT_MAGNITUDE,
            #
            # Here, in the documentation, we do a little trick so that
            # we use the actual text that's displayed in the documentation.
            #
            # {GRADIENT_MAGNITUDE} will get changed into "Gradient magnitude"
            # etc. Python will look in keyword arguments for format()
            # for the "GRADIENT_" names and paste them in where it sees
            # a matching {GRADIENT_...}.
            #
            doc="""\
Choose what to calculate:

-  *{GRADIENT_MAGNITUDE}*: calculate the magnitude of the gradient at
   each pixel.
-  *{GRADIENT_DIRECTION_X}*: get the relative contribution of the
   gradient in the X direction (.5 = no contribution, 0 to .5 =
   decreasing with increasing X, .5 to 1 = increasing with increasing
   X).
-  *{GRADIENT_DIRECTION_Y}*: get the relative contribution of the
   gradient in the Y direction.
""".format(
                **{
                    "GRADIENT_MAGNITUDE": GRADIENT_MAGNITUDE,
                    "GRADIENT_DIRECTION_X": GRADIENT_DIRECTION_X,
                    "GRADIENT_DIRECTION_Y": GRADIENT_DIRECTION_Y,
                }),
        )

        #
        # A binary setting displays a checkbox.
        #
        self.automatic_smoothing = Binary(
            text="Automatically choose the smoothing scale?",
            value=True,  # The default value is to choose automatically
            doc=
            "The module will automatically choose a smoothing scale for you if you leave this checked.",
        )

        #
        # We do a little smoothing which supplies a scale to the gradient.
        #
        # We use a float setting so that the user can give us a number
        # for the scale. The control will turn red if the user types in
        # an invalid scale.
        #
        self.scale = Float(
            text="Scale",
            value=1,  # The default value is 1 - a short-range scale
            minval=0.1,  # We don't let the user type in really small values
            maxval=100,  # or large values
            doc="""\
This is a scaling factor that supplies the sigma for a gaussian that's
used to smooth the image. The gradient is calculated on the smoothed
image, so large scales will give you long-range gradients and small
scales will give you short-range gradients.
""",
        )
コード例 #27
0
class MeasureTrackQuality(cpm.Module):
    module_name = "MeasureTrackQuality"
    category = "Measurement"
    variable_revision_number = 1

    CAT_MEASURE_TRACK_QUALITY = "MeasureTrackQuality"
    MEAS_TRAM = "TrAM"
    MEAS_LABELS = "Labels"
    MEAS_PARENT = "Is_Parent"
    MEAS_SPLIT = "Split_Trajectory"
    FULL_TRAM_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_TRAM)
    FULL_LABELS_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_LABELS)
    FULL_PARENT_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_PARENT)
    FULL_SPLIT_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_SPLIT)
    IMAGE_NUM_KEY = "Image"
    MIN_NUM_KNOTS = 3

    LABELS_KEY = "labels"
    IMAGE_NUMS_KEY = "image_nums"
    OBJECT_NUMS_KEY = "object_nums"
    PARENT_OBJECT_NUMS_KEY = "parent_object_nums"
    TRAM_KEY = "TrAM"
    SPLIT_KEY = "split"
    PARENT_KEY = "parent"

    def create_settings(self):
        # choose the tracked objects to measure TrAM on
        self.object_name = LabelSubscriber(
                "Tracked objects", "None", doc="""
            Select the tracked objects for computing TrAM.""")

        # which measurements will go into the TrAM computation
        self.tram_measurements = MeasurementMultiChoiceForCategory(
            "TrAM measurements", category_chooser=self.object_name, doc="""
            These are measurements for the selected tracked objects which
            will be used in the TrAM computation. At least one must be selected.""")

        # Treat X-Y value pairs as isotropic in the TrAM measure?
        self.isotropic = cps.Binary(
            'Isotropic XY metric?', True, doc="""
            If selected (the default) then measurements that are available
            as X-Y pairs (e.g. location) will be have an isotropic
            metric applied in TrAM. Note that the X-Y-Z extension of this feature
            is not currently available.
            """)

        # number of spline knots
        self.num_knots = Integer(
            "Number of spline knots", 4, minval=self.MIN_NUM_KNOTS, doc="""
            The number of knots (indpendent values) used
            when computing smoothing splines. This should be around 1/5th the number
            of frames for reasonably oversampled time lapse sequences, and must be 3
            or greater. It is approximately the maximum number of wiggles expected in
            well-tracked trajectories
            """)

        # TrAM exponent
        self.tram_exponent = Float(
            "TrAM exponent", 0.5, minval=0.01, maxval=1.0, doc="""
            This number is between 0.01 and 1 (default 0.5), and specifies how
            strongly simultaneous sudden changes in multiple features synergize in
            the TrAM metric. A lower value signifies higher synergy (at the risk of
            missing tracking failures that are reflected in only some of the features).
            """)

    def settings(self):
        return [self.object_name, self.tram_measurements, self.isotropic, self.num_knots, self.tram_exponent]

    def validate_module(self, pipeline):
        '''Make sure that the user has selected at least one measurement for TrAM and that there are tracking data.'''
        if len(self.get_selected_tram_measurements()) == 0:
            raise cps.ValidationError(
                    "Please select at least one TrAM measurement for tracking of {}".format(self.object_name.value),
                    self.tram_measurements)

        # check on available tracking columns for the selected object
        obj_name = self.object_name.value
        mc = pipeline.get_measurement_columns()
        num_tracking_cols = len([entry for entry in mc if entry[0] == obj_name and entry[1].startswith(trackobjects.F_PREFIX)])
        if num_tracking_cols == 0:
            msg = "No {} data available for {}. Please select an object with tracking data.".format(trackobjects.F_PREFIX, obj_name)
            raise cps.ValidationError(msg, self.object_name)

    def run(self, workspace):
        pass

    def display_post_group(self, workspace, figure):
        if self.show_window:
            figure.set_subplots((1,1))
            figure.subplot_histogram(0, 0, workspace.display_data.tram_values, bins=40, xlabel="TrAM",
                                     title="TrAM for {}".format(self.object_name.value))

    def post_group(self, workspace, grouping):
        self.show_window = True

        measurements = workspace.measurements
        obj_name = self.object_name.value # the object the user has selected

        # get the image numbers
        group_number = grouping["Group_Number"]
        groupings = workspace.measurements.get_groupings(grouping)
        img_numbers = sum([numbers for group, numbers in groupings if int(group["Group_Number"]) == group_number], [])

        num_images = len(img_numbers)
        if num_images < TRAM_MIN_TIME_POINTS:
            logger.warning("Need at least {} time points to compute TrAM. Found {}."
                           .format(TRAM_MIN_TIME_POINTS, num_images))

        # get vector of tracking label for each data point
        feature_names = measurements.get_feature_names(obj_name)
        tracking_label_feature_name = [name for name in feature_names
                                       if name.startswith("{}_{}".format(trackobjects.F_PREFIX, trackobjects.F_LABEL))][0]
        label_vals = measurements.get_measurement(obj_name, tracking_label_feature_name, img_numbers)
        label_vals_flattened_all = numpy.concatenate(label_vals).ravel().tolist()
        # determine which indexes we should keep. Get rid of any nan label values
        not_nan_indices = [i for i, label in enumerate(label_vals_flattened_all) if not numpy.isnan(label)]
        label_vals_flattened = [label_vals_flattened_all[i] for i in not_nan_indices] # excludes nan

        # convenience function to flatten and remove values corresponding to nan labels
        def extract_flattened_measurements_for_valid_labels(lol):
            return [numpy.concatenate(lol).tolist()[i] for i in not_nan_indices]

        # function to get a tuple dictionary entry relating feature name with data values
        def get_feature_values_tuple(sel):
            feat_obj_name, feat_name = sel.split("|")
            vals = measurements.get_measurement(feat_obj_name, feat_name, measurements.get_image_numbers())
            vals_flattened = extract_flattened_measurements_for_valid_labels(vals)
            return (feat_name, vals_flattened)

        # get all the data for TrAM
        selections = self.get_selected_tram_measurements() # measurements that the user wants to run TrAM on
        all_values_dict = dict(get_feature_values_tuple(sel) for sel in selections)
        # determine if there are any potential isotropic (XY) pairs
        if self.isotropic.value:
            isotropic_pairs = MeasureTrackQuality.Determine_Isotropic_pairs(all_values_dict.keys())
        else:
            isotropic_pairs = []

        # sanity check: make sure all vectors have the same length
        vec_lengths = set([len(value) for value in all_values_dict.values()])
        assert len(vec_lengths) == 1, "Measurement vectors have differing lengths"

        # get vector of image numbers into the dict
        counts = [len([v for v in x if not numpy.isnan(v)]) for x in label_vals] # number of non-nan labels at each time point
        image_vals = [[image for _ in range(count)] for image, count in zip(img_numbers, counts)] # repeat image number
        image_vals_flattened = sum(image_vals, [])

        # determine max lifetime by label so we can select different object behaviors
        lifetime_feature_name = [name for name in feature_names
                                 if name.startswith("{}_{}".format(trackobjects.F_PREFIX, trackobjects.F_LIFETIME))][0]
        lifetime_vals_flattened =\
            extract_flattened_measurements_for_valid_labels(measurements.get_measurement(obj_name,
                                                                                         lifetime_feature_name,
                                                                                         img_numbers))
        max_lifetime_by_label = dict(max(lifetimes)
                                     for label, lifetimes
                                     in itertools.groupby(zip(label_vals_flattened, lifetime_vals_flattened),
                                                          lambda x: x[0]))


        # Labels for objects that are tracked the whole time.
        label_counts = Counter(label_vals_flattened) # dict with count of each label
        labels_for_complete_trajectories = [label for label in max_lifetime_by_label.keys()
                                            if max_lifetime_by_label[label] == num_images
                                            and label_counts[label] == num_images]
        # labels for objects there the whole time but result from splitting
        labels_for_split_trajectories = [label for label in max_lifetime_by_label.keys()
                                         if max_lifetime_by_label[label] == num_images
                                         and label_counts[label] > num_images
                                         and not numpy.isnan(label)]


        # create dictionary to translate from label to object number in last frame. This is how we will store results.
        object_nums = measurements.get_measurement(obj_name, M_NUMBER_OBJECT_NUMBER, img_numbers) # list of lists
        object_nums_flattened = extract_flattened_measurements_for_valid_labels(object_nums)
        object_count_by_image = {img_num:len(v) for img_num, v in zip(img_numbers, object_nums)}

        # create a mapping from object number in an image to its index in the data array for later
        index_by_img_and_object = {(img_num, obj_num): index for img_num, obj_nums in zip(img_numbers, object_nums)
                                   for index, obj_num in enumerate(obj_nums)}

        # now restrict vectors only to labels of complete trajectories
        complete_trajectory_indices = [i for i, label in enumerate(label_vals_flattened) if label in labels_for_complete_trajectories]
        all_values_dict_complete_trajectories = {k : [v[i] for i in complete_trajectory_indices] for k, v in all_values_dict.items()}

        # compute typical inter-timepoint variation for complete trajectories only.
        label_vals_flattened_complete_trajectories = [label_vals_flattened[i] for i in complete_trajectory_indices]
        image_vals_flattened_complete_trajectories = [image_vals_flattened[i] for i in complete_trajectory_indices]
        tad = MeasureTrackQuality.compute_typical_deviations(all_values_dict_complete_trajectories,
                                                               label_vals_flattened_complete_trajectories,
                                                               image_vals_flattened_complete_trajectories)

        # put all the data into a 2D array and normalize by typical deviations
        all_data_array = numpy.column_stack(all_values_dict.values())
        tram_feature_names = all_values_dict_complete_trajectories.keys()
        inv_devs = numpy.diag([1 / tad[k] for k in tram_feature_names]) # diagonal matrix of inverse typical deviation
        normalized_all_data_array = numpy.dot(all_data_array, inv_devs) # perform the multiplication

        # this is how we identify our TrAM measurements to objects
        next_available_tram_label = 0

        # Compute TrAM for each complete trajectory. Store result in tram_dict using TrAM label as key.
        tram_dict = dict()
        for label in labels_for_complete_trajectories:
            indices = [i for i, lab in enumerate(label_vals_flattened) if lab == label]

            if len(indices) < TRAM_MIN_TIME_POINTS: # not enough data points
                tram = None
            else:
                tram = MeasureTrackQuality.compute_TrAM(tram_feature_names, normalized_all_data_array,
                                                        image_vals_flattened, indices, self.num_knots.get_value(),
                                                        self.tram_exponent.get_value(), isotropic_pairs)

            obj_nums = {image_vals_flattened[i] : object_nums_flattened[i] for i in indices} # pairs of image and object
            tram_dict.update({next_available_tram_label : {self.TRAM_KEY : tram, self.OBJECT_NUMS_KEY : obj_nums, self.SPLIT_KEY : 0}})
            next_available_tram_label += 1

        # now compute TrAM for split trajectories
        tracking_info_dict = dict()
        tracking_info_dict[self.LABELS_KEY] = label_vals_flattened
        tracking_info_dict[self.IMAGE_NUMS_KEY] = image_vals_flattened
        tracking_info_dict[self.OBJECT_NUMS_KEY] = object_nums_flattened

        parent_object_text_start = "{}_{}".format(trackobjects.F_PREFIX, trackobjects.F_PARENT_OBJECT_NUMBER)
        parent_object_feature = next(feature_name for feature_name in feature_names
                                     if feature_name.startswith(parent_object_text_start))
        tracking_info_dict[self.PARENT_OBJECT_NUMS_KEY] = \
            extract_flattened_measurements_for_valid_labels(measurements.get_measurement(obj_name,
                                                                                         parent_object_feature,
                                                                                         img_numbers))

        split_trajectories_tram_dict = \
            self.evaluate_tram_for_split_objects(labels_for_split_trajectories, tram_feature_names,
                                                 isotropic_pairs, normalized_all_data_array,
                                                 tracking_info_dict, next_available_tram_label)
        tram_dict.update(split_trajectories_tram_dict) # store them with the others

        def get_element_or_default_for_None(x, index, default):
            if x is None:
                return default
            else:
                return x[index]

        results_to_store_by_img = {img_num: [None for _ in range(object_count_by_image[img_num])]
                                   for img_num in img_numbers} # Seems excessive. there must be a better way.

        # cycle through each tram computed
        for tram_label, traj_dict in tram_dict.items():
            tram = traj_dict[self.TRAM_KEY]
            split_flag = traj_dict[self.SPLIT_KEY]
            for img_num, object_num in traj_dict[self.OBJECT_NUMS_KEY].items(): # every object across images for this tram
                index = index_by_img_and_object[(img_num, object_num)]
                result_dict = results_to_store_by_img[img_num][index]

                if result_dict is None:
                    result_dict = dict() # initialize
                    results_to_store_by_img[img_num][index] = result_dict # store it
                    result_dict.update({self.PARENT_KEY:0})
                    result_dict.update({self.TRAM_KEY:tram})
                    result_dict.update({self.LABELS_KEY:[tram_label]})
                else: # if there is already a TRAM_KEY then we are a parent and don't have a valid TrAM
                    result_dict.update({self.PARENT_KEY:1})
                    result_dict.update({self.TRAM_KEY:None})
                    previous_list = result_dict[self.LABELS_KEY]
                    previous_list.append(tram_label)

                result_dict.update({self.SPLIT_KEY: split_flag})

        # Loop over all images and save out
        tram_values_to_save = list()
        parent_values_to_save = list()
        split_values_to_save = list()
        label_values_to_save = list()

        for img_num, vec in results_to_store_by_img.items():
            tram_values_to_save.append([get_element_or_default_for_None(v, self.TRAM_KEY, None) for v in vec])
            parent_values_to_save.append([get_element_or_default_for_None(v, self.PARENT_KEY, None) for v in vec])
            split_values_to_save.append([get_element_or_default_for_None(v, self.SPLIT_KEY, None) for v in vec])
            label_values_to_save.append([get_element_or_default_for_None(v, self.LABELS_KEY, None) for v in vec])

        img_nums = results_to_store_by_img.keys()
        workspace.measurements.add_measurement(obj_name, self.FULL_TRAM_MEAS_NAME, tram_values_to_save, image_set_number=img_nums)
        workspace.measurements.add_measurement(obj_name, self.FULL_PARENT_MEAS_NAME, parent_values_to_save, image_set_number=img_nums)
        workspace.measurements.add_measurement(obj_name, self.FULL_SPLIT_MEAS_NAME, split_values_to_save, image_set_number=img_nums)
        workspace.measurements.add_measurement(obj_name, self.FULL_LABELS_MEAS_NAME, label_values_to_save, image_set_number=img_nums)

        # store the existing TrAM values for the histogram display
        workspace.display_data.tram_values = [d.get(self.TRAM_KEY)
                                              for d in tram_dict.values() if d.get(self.TRAM_KEY) is not None]

    @staticmethod
    def compute_TrAM(tram_feature_names, normalized_data_array, image_vals_flattened, indices,
                     num_knots, tram_exponent, isotropic_pairs):
        """
        Compute the TrAM statistic for a single trajectory
        
        :param tram_feature_names: Names of the features to use (in order of the columns in normalized_data_array) 
        :param normalized_data_array: Source of data (normalized to typical absolute deviations). Columns correspond
        to TrAM features, and rows are for all objects across images
        :param image_vals_flattened: The image numbers corresponding to rows in normalized_data_array
        :param indices: The rows in normalized_data_array which are for this trajectory
        :param num_knots: Number of knots in the smoothing spline 
        :param tram_exponent: TrAM exponent used to combine aberrations
        :param isotropic_pairs: List of XY-pairs of features which should be treated with a Euclidian metric
        :return: The computed TrAM value
        """
        normalized_data_for_label = normalized_data_array[indices,:]  # get the corresponding data
        images = [image_vals_flattened[i] for i in indices]

        normalized_data_for_label = normalized_data_for_label[numpy.argsort(images),]  # order by image
        normalized_values_dict = {tram_feature_names[i]: normalized_data_for_label[:, i] for i in range(0, len(tram_feature_names))}

        def compute_single_aberration(normalized_values):
            """
            Figure out the deviation from smooth at each time point
            :param normalized_values: time series of values, normalized to the typical deviation
            :return: list of absolute deviation values from smooth
            """
            n = len(normalized_values)
            xs = numpy.array(range(1, n + 1), float)
            knot_deltas = (n-1.0)/(num_knots+1.0)
            knot_locs = 1 + numpy.array(range(1, num_knots)) * knot_deltas

            try:
                interp_func = scipy.interpolate.LSQUnivariateSpline(xs, normalized_values, knot_locs)
                smoothed_vals = interp_func(xs)
            except ValueError:
                smoothed_vals = numpy.zeros(len(xs)) + numpy.nan # return nan array

            return abs(normalized_values - smoothed_vals)

        # compute aberrations for each of the features
        aberration_dict = {feat_name : compute_single_aberration(numpy.array(values))
                           for feat_name, values in normalized_values_dict.items()}

        # now combine them with the appropriate power
        aberration_array = numpy.column_stack(aberration_dict.values())

        # handle Euclidian weightings
        num_isotropic = len(isotropic_pairs)
        if num_isotropic != 0:
            column_names = aberration_dict.keys()
            remaining_features = list(column_names)

            column_list = list() # we will accumulate data here
            weight_list = list() # will accumulate weights here

            for x, y in isotropic_pairs:
                # find data columns
                x_col = next(i for i, val in enumerate(column_names) if x == val)
                y_col = next(i for i, val in enumerate(column_names) if y == val)

                isotropic_vec = numpy.sqrt(numpy.apply_along_axis(numpy.mean, 1, aberration_array[:, (x_col, y_col)]))
                column_list.append(isotropic_vec)
                weight_list.append(2) # 2 data elements used to weight is twice the usual

                # remove the column names from remaining features
                remaining_features.remove(x)
                remaining_features.remove(y)

            # all remaining features have weight 1
            for feature_name in remaining_features:
                col = next(i for i, val in enumerate(column_names) if val == feature_name)
                column_list.append(aberration_array[:,col])
                weight_list.append(1)

            data_array = numpy.column_stack(column_list) # make array
            weight_array = numpy.array(weight_list, float)
            weight_array = weight_array / numpy.sum(weight_array) # normalize weights
            weight_matrix = numpy.diag(weight_array)

            pwr = numpy.power(data_array, tram_exponent)
            weighted_means = numpy.apply_along_axis(numpy.sum, 1, numpy.matmul(pwr, weight_matrix))
            tram = numpy.max(numpy.power(weighted_means, 1.0 / tram_exponent))
        else:
            pwr = numpy.power(aberration_array, tram_exponent)
            means = numpy.apply_along_axis(numpy.mean, 1, pwr)
            tram = numpy.max(numpy.power(means, 1.0 / tram_exponent))

        return tram

    def evaluate_tram_for_split_objects(self, labels_for_split_trajectories, tram_feature_names, isotropic_pairs,
                                        normalized_data_array, tracking_info_dict, next_available_tram_label):
        """
        Compute TrAM results for objects that have split trajectories        
        :param labels_for_split_trajectories: TrackObjects labels for trajectories that split.
        :param tram_feature_names:  The feature names that are used to compute TrAM.
        :param isotropic_pairs: List of feature pairs (XY) to be Euclidianized.
        :param normalized_data_array: Data for the TrAM features, normalized by typical absolute deviation.
        :param tracking_info_dict: Dictionary of other relevant information about the objects.
        :param next_available_tram_label: Tram label number. We increment this as we use it.
        :return: Dictionary whose keys are TrAM labels and values are dictionaries containing values
        for the keys TRAM_KEY, OBJECT_NUMS_KEY, SPLIT_KEY
        """

        label_vals_flattened = tracking_info_dict[self.LABELS_KEY]
        image_vals_flattened = tracking_info_dict[self.IMAGE_NUMS_KEY]
        object_nums_flattened = tracking_info_dict[self.OBJECT_NUMS_KEY]
        parent_object_nums_flattened = tracking_info_dict[self.PARENT_OBJECT_NUMS_KEY]

        first_image_num = min(image_vals_flattened)
        last_image_num = max(image_vals_flattened)

        # Make a map from (image,object_number) to flattened array index so we can find parents
        img_obj_to_index = dict([((image_vals_flattened[i], object_nums_flattened[i]), i)
                                 for i in range(0, len(image_vals_flattened))])

        # Make a map from label to object number(s) for the last image. We will work backward from these
        object_nums_for_label_last_image = defaultdict(list) # need to store lists because there can be multiple
        # Restrict to labels for split trajectories and only last image
        for label, object_num, image_num in zip(label_vals_flattened, object_nums_flattened, image_vals_flattened):
            if image_num == last_image_num and label in labels_for_split_trajectories:
                object_nums_for_label_last_image[label].append(object_num)

        # Compute TrAM for each label of split objects. They will all have
        # a complete set of predecessor objects going from the end to the start since
        # they were filtered to have a max lifetime equal to the number of frames.
        # Here we piece together the entire trajectory for each object and compute TrAM.
        # construct the object trajectory in terms of array indexes. These get placed
        # in an accumulator (list) that should be initialized as empty.
        def get_parent_indices(image_num, object_num, index_accum, object_num_accum):
            if image_num < first_image_num: return

            index = img_obj_to_index[(image_num, object_num)]
            parent_object_num = parent_object_nums_flattened[index]
            get_parent_indices(image_num - 1, parent_object_num, index_accum, object_num_accum) # recurse for all earlier

            index_accum.append(index)
            object_num_accum.append(object_num)

        # cycle through everything in our dict and compute tram. Store.
        result = dict()
        for label in object_nums_for_label_last_image.keys():
            for object_num_last_image in object_nums_for_label_last_image.get(label): # this is a list
                indices_list = list()
                object_nums_list = list()
                get_parent_indices(last_image_num, object_num_last_image, indices_list, object_nums_list)

                # Indices now contains the indices for the tracked object across images
                tram = MeasureTrackQuality.compute_TrAM(tram_feature_names, normalized_data_array, image_vals_flattened,
                                                        indices_list, self.num_knots.get_value(),
                                                        self.tram_exponent.get_value(), isotropic_pairs)

                # for each image number, the corresponding object number
                obj_nums = dict(zip([image_vals_flattened[i] for i in indices_list], object_nums_list))

                result.update({next_available_tram_label: {self.TRAM_KEY:tram, self.OBJECT_NUMS_KEY:obj_nums,
                                                           self.SPLIT_KEY:1}})
                next_available_tram_label += 1

        return result

    @staticmethod
    def compute_typical_deviations(values_dict, labels_vec, image_vec):
        """
        Compute the median absolute temporal difference in each of the features across all tracks
        
        :param values_dict: keys are feature names, values are lists of data values across images and tracks
        :param labels_vec: A list of track labels corresponding to data values in their arrays
        :param image_vec: A list of image numbers corresponding to data values in their arrays
        :return: dictionary whose keys are feature names and values are median absolute differences
        """
        # input is a list of time series lists
        def compute_median_abs_deviation(values_lists):
            return numpy.median(numpy.concatenate([numpy.abs(numpy.diff(vals)) for vals in values_lists]).ravel())

        # mapping from label to indices
        labels_dict = dict()
        labels_set = set(labels_vec)
        for label in labels_set:
            indices = [i for i, lab in enumerate(labels_vec) if lab == label] # which match
            labels_dict.update({label : indices})

        result = dict()
        # for each feature get the deltas in time
        for feat_name, values in values_dict.items():
            all_diffs = list()
            for label, indices in labels_dict.items():
                data = [values[i] for i in indices]
                images = [image_vec[i] for i in indices]
                z = sorted(zip(images, data)) # get them in time order
                ordered_data = [data for _, data in z]
                all_diffs.append(ordered_data)
            mad = compute_median_abs_deviation(all_diffs)
            result.update({feat_name : mad})


        return result

    @staticmethod
    def Determine_Isotropic_pairs(features):
        """
        Look for any pairs that end in "_X" and "_Y" or have "_X_" and "_Y_" within them
        :param features:list of names 
        :return: list of tubples containing pairs of names which can be paired using an isotropic (Euclidian) metric
        """

        # first find all the ones with a "_X$"
        features_X_1 = [feature for feature in features if re.search("_X$", feature)]
        features_X_2 = [feature for feature in features if re.search("_X_", feature)]

        # get corresponding pairs
        paired_1 = [(feature, re.sub("_X$", "_Y", feature)) for feature in features_X_1]
        paired_2 = [(feature, re.sub("_X_", "_Y_", feature)) for feature in features_X_2]

        pairs = paired_1 + paired_2

        # only return pairs where the Y feature exists
        return [(x, y) for x, y in pairs if y in features]

    # Get the selected measurements, restricted to those which start with the object name
    def get_selected_tram_measurements(self):
        # get what was selected by the user
        selections = self.tram_measurements.get_selections()

        # get the object set to work on
        object_name = self.object_name.value

        return [sel for sel in selections if sel.startswith(object_name)]

    def get_measurement_columns(self, pipeline):
        return [(self.object_name.value, self.FULL_TRAM_MEAS_NAME, COLTYPE_FLOAT),
                (self.object_name.value, self.FULL_PARENT_MEAS_NAME, COLTYPE_FLOAT),
                (self.object_name.value, self.FULL_SPLIT_MEAS_NAME, COLTYPE_FLOAT),
                (self.object_name.value, self.FULL_LABELS_MEAS_NAME, COLTYPE_BLOB)]

    def get_categories(self, pipeline, object_name):
        if object_name == self.object_name.value:
            return [self.CAT_MEASURE_TRACK_QUALITY]
        return []

    def get_measurements(self, pipeline, object_name, category):
        if object_name == self.object_name.value and category == self.CAT_MEASURE_TRACK_QUALITY:
            return [self.MEAS_TRAM, self.MEAS_PARENT, self.MEAS_SPLIT, self.MEAS_LABELS]
        return []

    def is_aggregation_module(self):
        """If true, the module uses data from other imagesets in a group

        Aggregation modules perform operations that require access to
        all image sets in a group, generally resulting in an aggregation
        operation during the last image set or in post_group. Examples are
        TrackObjects, MakeProjection and CorrectIllumination_Calculate.
        """
        return True
コード例 #28
0
    def create_settings(self):
        super(MergeObjects, self).create_settings()

        self.size = Float(
            text="Minimum object size",
            value=64.,
            doc=
            "Objects smaller than this diameter will be merged with their most significant neighbor."
        )

        self.use_contact_area = cellprofiler_core.setting.Binary(
            text="Set minimum contact area threshold",
            value=False,
            doc=
            "Use this setting for setting a minimum contact area value (either relative or absolute)"
        )

        self.contact_area_method = Choice(
            text="Minimum contact area method",
            choices=[A_ABSOLUTE, A_RELATIVE],
            value=A_ABSOLUTE,
            # TODO: This
            doc="""""")

        self.abs_neighbor_size = cellprofiler_core.setting.text.Integer(
            text="Absolute minimum contact area",
            value=0,
            doc="""
When considering to merge an object, the largest neighbor must have at 
least this many bordering pixels in order to have the current object 
merge into it.

The default of 0 means no minimum is required.""")

        self.rel_neighbor_size = cellprofiler_core.setting.text.Float(
            text="Relative minimum contact area",
            value=0,
            minval=0,
            maxval=1,
            doc="""
When considering to merge an object, the largest neighbor must have at 
least percentage of its surface area contacting the object in order for the 
current object to merge into it.

The default of 0 means no minimum is required.""")

        self.plane_wise = cellprofiler_core.setting.Binary(
            text="Plane wise merge",
            value=False,
            doc="""\
Select "*{YES}*" to merge objects on a per-plane level. 
This will perform the "significant neighbor" merge on 
each plane of a volumetric image, rather than on the 
image as a whole. This may be helpful for removing seed
artifacts that are the result of segmentation.
**Note**: Plane-wise operations will be considerably slower.
""".format(**{"YES": "Yes"}))

        self.remove_below_threshold = cellprofiler_core.setting.Binary(
            text="Remove objects below size threshold",
            value=False,
            doc="""\
Select "*{YES}*" to ensure that objects below the minimum size
threshold with no larger significant neighbor will not be 
removed. Objects below the threshold with no neighbors are kept
by default.
""".format(**{"YES": "Yes"}))
コード例 #29
0
ファイル: smooth.py プロジェクト: yaweiyang-sz/CellProfiler
    def create_settings(self):
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="""Select the image to be smoothed.""",
        )

        self.filtered_image_name = ImageName(
            "Name the output image",
            "FilteredImage",
            doc="""Enter a name for the resulting image.""",
        )

        self.smoothing_method = Choice(
            "Select smoothing method",
            [
                FIT_POLYNOMIAL,
                GAUSSIAN_FILTER,
                MEDIAN_FILTER,
                SMOOTH_KEEPING_EDGES,
                CIRCULAR_AVERAGE_FILTER,
                SM_TO_AVERAGE,
            ],
            doc="""\
This module smooths images using one of several filters. Fitting a
polynomial is fastest but does not allow a very tight fit compared to
the other methods:

-  *%(FIT_POLYNOMIAL)s:* This method is fastest but does not allow
   a very tight “fit” compared to the other methods. Thus, it will usually be less
   accurate. The method treats the intensity of the image
   pixels as a polynomial function of the x and y position of each
   pixel. It fits the intensity to the polynomial, *A x* :sup:`2` *+ B
   y* :sup:`2` *+ C xy + D x + E y + F*. This will produce a smoothed
   image with a single peak or trough of intensity that tapers off
   elsewhere in the image. For many microscopy images (where the
   illumination of the lamp is brightest in the center of field of
   view), this method will produce an image with a bright central region
   and dimmer edges. But, in some cases the peak/trough of the
   polynomial may actually occur outside of the image itself.
-  *%(GAUSSIAN_FILTER)s:* This method convolves the image with a
   Gaussian whose full width at half maximum is the artifact diameter
   entered. Its effect is to blur and obscure features smaller than the
   specified diameter and spread bright or dim features larger than the
   specified diameter.
-  *%(MEDIAN_FILTER)s:* This method finds the median pixel value within
   the diameter you specify. It removes bright or dim features
   that are significantly smaller than the specified diameter.
-  *%(SMOOTH_KEEPING_EDGES)s:* This method uses a bilateral filter
   which limits Gaussian smoothing across an edge while applying
   smoothing perpendicular to an edge. The effect is to respect edges in
   an image while smoothing other features. *%(SMOOTH_KEEPING_EDGES)s*
   will filter an image with reasonable speed for artifact diameters
   greater than 10 and for intensity differences greater than 0.1. The
   algorithm will consume more memory and operate more slowly as you
   lower these numbers.
-  *%(CIRCULAR_AVERAGE_FILTER)s:* This method convolves the image with
   a uniform circular averaging filter whose size is the artifact
   diameter entered. This filter is useful for re-creating an
   out-of-focus blur to an image.
-  *%(SM_TO_AVERAGE)s:* Creates a flat, smooth image where every pixel
   of the image equals the average value of the original image.

*Note, when deciding between %(MEDIAN_FILTER)s and %(GAUSSIAN_FILTER)s
we typically recommend
%(MEDIAN_FILTER)s over %(GAUSSIAN_FILTER)s because the
median is less sensitive to outliers, although the results are also
slightly less smooth and the fact that images are in the range of 0
to 1 means that outliers typically will not dominate too strongly
anyway.*
"""
            % globals(),
        )

        self.wants_automatic_object_size = Binary(
            "Calculate artifact diameter automatically?",
            True,
            doc="""\
*(Used only if “%(GAUSSIAN_FILTER)s”, “%(MEDIAN_FILTER)s”, “%(SMOOTH_KEEPING_EDGES)s” or “%(CIRCULAR_AVERAGE_FILTER)s” is selected)*

Select *Yes* to choose an artifact diameter based on the size of
the image. The minimum size it will choose is 30 pixels, otherwise the
size is 1/40 of the size of the image.

Select *No* to manually enter an artifact diameter.
"""
            % globals(),
        )

        self.object_size = Float(
            "Typical artifact diameter",
            16.0,
            doc="""\
*(Used only if choosing the artifact diameter automatically is set to
“No”)*

Enter the approximate diameter (in pixels) of the features to be blurred
by the smoothing algorithm. This value is used to calculate the size of
the spatial filter. {} For most
smoothing methods, selecting a diameter over ~50 will take substantial
amounts of time to process.
""".format(
                HELP_ON_MEASURING_DISTANCES
            ),
        )

        self.sigma_range = Float(
            "Edge intensity difference",
            0.1,
            doc="""\
*(Used only if “{smooth_help}” is selected)*

Enter the intensity step (which indicates an edge in an image) that you
want to preserve. Edges are locations where the intensity changes
precipitously, so this setting is used to adjust the rough magnitude of
these changes. A lower number will preserve weaker edges. A higher
number will preserve only stronger edges. Values should be between zero
and one. {pixel_help}
""".format(
                smooth_help=SMOOTH_KEEPING_EDGES, pixel_help=HELP_ON_PIXEL_INTENSITIES
            ),
        )

        self.clip = Binary(
            "Clip intensities to 0 and 1?",
            True,
            doc="""\
*(Used only if "{fit}" is selected)*

The *{fit}* method is the only smoothing option that can
yield an output image whose values are outside of the values of the
input image. This setting controls whether to limit the image
intensity to the 0 - 1 range used by CellProfiler.

Select *Yes* to set all output image pixels less than zero to zero
and all pixels greater than one to one.

Select *No* to allow values less than zero and greater than one in
the output image.
""".format(
                fit=FIT_POLYNOMIAL
            ),
        )
コード例 #30
0
    def create_settings(self):
        # XXX needs to use cps.SettingsGroup
        class Operand(object):
            """Represents the collection of settings needed by each operand"""
            def __init__(self, index, operation):
                self.__index = index
                self.__operation = operation
                self.__operand_choice = Choice(
                    self.operand_choice_text(),
                    MC_ALL,
                    doc=
                    """Indicate whether the operand is an image or object measurement.""",
                )

                self.__operand_objects = LabelName(
                    self.operand_objects_text(),
                    "None",
                    doc=
                    """Choose the objects you want to measure for this operation.""",
                )

                self.__operand_measurement = Measurement(
                    self.operand_measurement_text(),
                    self.object_fn,
                    doc="""\
Enter the category that was used to create the measurement. You
will be prompted to add additional information depending on
the type of measurement that is requested.""",
                )

                self.__multiplicand = Float(
                    "Multiply the above operand by",
                    1,
                    doc=
                    """Enter the number by which you would like to multiply the above operand.""",
                )

                self.__exponent = Float(
                    "Raise the power of above operand by",
                    1,
                    doc=
                    """Enter the power by which you would like to raise the above operand.""",
                )

            @property
            def operand_choice(self):
                """Either MC_IMAGE for image measurements or MC_OBJECT for object"""
                return self.__operand_choice

            @property
            def operand_objects(self):
                """Get measurements from these objects"""
                return self.__operand_objects

            @property
            def operand_measurement(self):
                """The measurement providing the value of the operand"""
                return self.__operand_measurement

            @property
            def multiplicand(self):
                """Premultiply the measurement by this value"""
                return self.__multiplicand

            @property
            def exponent(self):
                """Raise the measurement to this power"""
                return self.__exponent

            @property
            def object(self):
                """The name of the object for measurement or "Image\""""
                if self.operand_choice == MC_IMAGE:
                    return IMAGE
                else:
                    return self.operand_objects.value

            def object_fn(self):
                if self.__operand_choice == MC_IMAGE:
                    return IMAGE
                elif self.__operand_choice == MC_OBJECT:
                    return self.__operand_objects.value
                else:
                    raise NotImplementedError(
                        "Measurement type %s is not supported" %
                        self.__operand_choice.value)

            def operand_name(self):
                """A fancy name based on what operation is being performed"""
                if self.__index == 0:
                    return ("first operand" if self.__operation
                            in (O_ADD, O_MULTIPLY) else "minuend"
                            if self.__operation == O_SUBTRACT else "numerator")
                elif self.__index == 1:
                    return ("second operand" if self.__operation
                            in (O_ADD, O_MULTIPLY) else "subtrahend" if
                            self.__operation == O_SUBTRACT else "denominator")

            def operand_choice_text(self):
                return self.operand_text("Select the %s measurement type")

            def operand_objects_text(self):
                return self.operand_text("Select the %s objects")

            def operand_text(self, format):
                return format % self.operand_name()

            def operand_measurement_text(self):
                return self.operand_text("Select the %s measurement")

            def settings(self):
                """The operand settings to be saved in the output file"""
                return [
                    self.operand_choice,
                    self.operand_objects,
                    self.operand_measurement,
                    self.multiplicand,
                    self.exponent,
                ]

            def visible_settings(self):
                """The operand settings to be displayed"""
                self.operand_choice.text = self.operand_choice_text()
                self.operand_objects.text = self.operand_objects_text()
                self.operand_measurement.text = self.operand_measurement_text()
                result = [self.operand_choice]
                result += ([self.operand_objects]
                           if self.operand_choice == MC_OBJECT else [])
                result += [
                    self.operand_measurement, self.multiplicand, self.exponent
                ]
                return result

        self.output_feature_name = Alphanumeric(
            "Name the output measurement",
            "Measurement",
            doc=
            """Enter a name for the measurement calculated by this module.""",
        )

        self.operation = Choice(
            "Operation",
            O_ALL,
            doc="""\
Choose the arithmetic operation you would like to perform. *None* is
useful if you simply want to select some of the later options in the
module, such as multiplying or exponentiating your image by a constant.
""",
        )

        self.operands = (Operand(0,
                                 self.operation), Operand(1, self.operation))

        self.spacer_1 = Divider(line=True)

        self.spacer_2 = Divider(line=True)

        self.spacer_3 = Divider(line=True)

        self.wants_log = Binary(
            "Take log10 of result?",
            False,
            doc="""Select *Yes* if you want the log (base 10) of the result."""
            % globals(),
        )

        self.final_multiplicand = Float(
            "Multiply the result by",
            1,
            doc="""\
*(Used only for operations other than "None")*

Enter the number by which you would like to multiply the result.
""",
        )

        self.final_exponent = Float(
            "Raise the power of result by",
            1,
            doc="""\
*(Used only for operations other than "None")*

Enter the power by which you would like to raise the result.
""",
        )

        self.final_addend = Float(
            "Add to the result",
            0,
            doc="""Enter the number you would like to add to the result.""",
        )

        self.constrain_lower_bound = Binary(
            "Constrain the result to a lower bound?",
            False,
            doc=
            """Select *Yes* if you want the result to be constrained to a lower bound."""
            % globals(),
        )

        self.lower_bound = Float(
            "Enter the lower bound",
            0,
            doc="""Enter the lower bound of the result here.""",
        )

        self.constrain_upper_bound = Binary(
            "Constrain the result to an upper bound?",
            False,
            doc=
            """Select *Yes* if you want the result to be constrained to an upper bound."""
            % globals(),
        )

        self.upper_bound = Float(
            "Enter the upper bound",
            1,
            doc="""Enter the upper bound of the result here.""",
        )

        self.rounding = Choice(
            "How should the output value be rounded?",
            ROUNDING,
            doc="""\
Choose how the values should be rounded- not at all, to a specified number of decimal places, 
to the next lowest integer ("floor rounding"), or to the next highest integer ("ceiling rounding").
Note that for rounding to an arbitrary number of decimal places, Python uses "round to even" rounding,
such that ties round to the nearest even number. Thus, 1.5 and 2.5 both round to to 2 at 0 decimal 
places, 2.45 rounds to 2.4, 2.451 rounds to 2.5, and 2.55 rounds to 2.6 at 1 decimal place. See the 
numpy documentation for more information.  
""",
        )

        self.rounding_digit = Integer(
            "Enter how many decimal places the value should be rounded to",
            0,
            doc="""\
Enter how many decimal places the value should be rounded to. 0 will round to an integer (e.g. 1, 2), 1 to 
one decimal place (e.g. 0.1, 0.2), -1 to one value before the decimal place (e.g. 10, 20), etc.
""",
        )