def create_settings(self):
     self.object_name = LabelSubscriber(
         "Input objects",
         "None",
         doc="""Enter the name of the objects whose population context is
         to be measured.""")
     self.operation = Choice(
         "Operation",
         choices=(O_POPULATION_DENSITY, O_DISTANCE_TO_EDGE, O_BOTH),
         doc="""Select the measurements you wish to perform. The choices
         are:<br><ul>
         <li><i>%(O_POPULATION_DENSITY)s</i> - calculate the population
         density within a radius from each cell.</li>
         <li><i>%(O_DISTANCE_TO_EDGE)s</i> - calculate the distance of
         each cell from the edge of a binary mask.</li>
         <li><i>%(O_BOTH)s</i> - make both measurements""" % globals())
     self.radius = Integer("Search radius",
                           50,
                           minval=1,
                           doc="""Count all objects within this radius""")
     self.object_diameter = Integer(
         "Object diameter",
         20,
         minval=0,
         doc="""The average diameter of objects in the image. This number
         is used to adjust the area of the image to account for objects
         that would otherwise be excluded because they were touching
         the border.""")
     self.edge_image = ImageSubscriber(
         "Edge image",
         doc="""For measuring distance to an edge, this is the reference
         image. Cell distances will be computed to the nearest foreground / 
         background edge in the reference image.""")
Beispiel #2
0
 def test_01_02_set_value(self):
     s = Integer("foo", value=5)
     for test_case in ("06", "-1"):
         s.value_text = test_case
         assert s == int(test_case)
         assert s.value_text == test_case
         s.test_valid(None)
Beispiel #3
0
    def create_settings(self):
        super(FindMaxima, self).create_settings()

        self.min_distance = Integer(
            text="Minimum distance between maxima",
            value=5,
            minval=0,
            doc="""Choose the minimum distance between accepted local maxima"""
        )

        self.exclude_mode = Choice("Method for excluding background",
                                   [MODE_THRESHOLD, MODE_MASK, MODE_OBJECTS],
                                   value="Threshold",
                                   doc=f"""\
By default, local maxima will be searched for across the whole image. This means that maxima will be found in 
areas that consist entirely of background. To resolve this we have several methods to exclude background.

**{MODE_THRESHOLD}** allows you to specify a minimum pixel intensity to be considered as a peak. Setting this to 0
effectively uses no threshold.

**{MODE_MASK}** will restrict peaks to areas which are within a provided mask image. This mask will typically come from 
the threshold module or another means of finding background.

**{MODE_OBJECTS}** will restrict peaks to areas within an existing set of objects.
""")

        self.min_intensity = Float("Specify the minimum intensity of a peak",
                                   0,
                                   minval=0,
                                   maxval=99,
                                   doc="""\
Intensity peaks below this threshold value will be excluded. Use this to ensure that your local 
maxima are within objects of interest.""")

        self.mask_image = ImageSubscriber(
            "Select the image to use as a mask",
            doc=
            "Select the image you want to use. This should be a binary image.")

        self.mask_objects = LabelSubscriber(
            "Select the objects to search within",
            doc="Select the objects within which to search for peaks.")

        self.maxima_color = Color(
            "Select maxima preview color",
            "Blue",
            doc="Maxima will be displayed in this color.",
        )

        self.maxima_size = Integer(
            "Select maxima preview size",
            value=1,
            minval=1,
            doc=
            "Size of the markers for each maxima in the preview. Positive pixels will be"
            "expanded by this radius."
            "You may want to increase this when working with large images.",
        )

        self.spacer = Divider(line=True)
Beispiel #4
0
    def create_settings(self):
        self.skeleton_name = ImageSubscriber(
            "Select an image to measure"
        )

        self.radius = Integer(
            "Radius"
        )

        self.step = Integer(
            "Step"
        )
    def create_settings(self):
        super(ResizeObjects, self).create_settings()

        self.method = Choice(
            "Method",
            ["Dimensions", "Factor", "Match Image"],
            doc="""\
The following options are available:

-  *Dimensions:* Enter the new height and width of the resized objects.
-  *Factor:* Enter a single value which specifies the scaling.""",
            value="Factor",
        )

        self.factor = Float(
            "Factor",
            0.25,
            minval=0,
            doc="""\
*(Used only if resizing by "Factor")*

Numbers less than 1 will shrink the objects; numbers greater than 1 will
enlarge the objects.""",
        )

        self.width = Integer(
            "Width",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by "Dimensions")*

Enter the desired width of the final objects, in pixels.""",
        )

        self.height = Integer(
            "Height",
            100,
            minval=1,
            doc="""\
*(Used only if resizing by "Dimensions")*

Enter the desired height of the final objects, in pixels.""",
        )

        self.specific_image = ImageSubscriber(
            "Select the image with the desired dimensions",
            "None",
            doc="""\
        *(Used only if resizing by specifying desired final dimensions using an image)*

        The input object set will be resized to the dimensions of the specified image.""",
        )
Beispiel #6
0
def convert_java_type_to_setting(param_name, param_type, param_class):
    """
    Helper method to convert ImageJ/Java class parameter types to CellProfiler settings

    Parameters
    ----------
    param_name : str, required
        The name of the parameter
    param_type : str, required
        The Java class name describing the parameter type
    param_class: str, required
        One of {input_class} or {output_class}, based on the parameter use

    Returns
    ---------
    A new Setting of a type appropriate for param_type, named with param_name. Or None if no valid conversion exists.
    """
    type_string = param_type.split()[1]
    img_strings = ("ij.ImagePlus", "net.imagej.Dataset", "net.imagej.ImgPlus")
    if INPUT_CLASS == param_class:
        param_label = param_name
        if type_string == "java.lang.String":
            return Alphanumeric(param_label, "")
        if type_string == "java.lang.Character":
            return Character(param_label, "")
        elif type_string == "java.lang.Integer":
            return Integer(param_label, 0, minval=-2 ** 31, maxval=((2 ** 31) - 1))
        elif type_string == "java.lang.Long":
            return Integer(param_label, 0, minval=-2 ** 63, maxval=((2 ** 63) - 1))
        elif type_string == "java.lang.Short":
            return Integer(param_label, 0, minval=-32768, maxval=32767)
        elif type_string == "java.lang.Byte":
            return Integer(param_label, 0, minval=-128, maxval=127)
        elif type_string == "java.lang.Boolean":
            return Boolean(param_label, 0)
        elif type_string == "java.lang.Float":
            return Float(param_label, minval=-2 ** 31, maxval=((2 ** 31) - 1))
        elif type_string == "java.lang.Double":
            return Float(param_label, minval=-2 ** 63, maxval=((2 ** 63) - 1))
        elif type_string == "java.io.File":
            return Filename(param_label, "")
        elif bool((img_string for img_string in img_strings if type_string == img_string)):
            return ImageSubscriber(param_label)
    elif OUTPUT_CLASS == param_class:
        if bool((img_string for img_string in img_strings if type_string == img_string)):
            return ImageName("[OUTPUT, " + type_string + "] " + param_name, param_name, doc=
            """
            You may use this setting to rename the indicated output variable, if desired.
            """
                             )

    return None
    def create_settings(self):

        self.input_object_name = LabelSubscriber(
            "Select objects to measure",
            "None",
            doc=
            """Select the objects whose radial entropy you want to measure.""")

        self.input_image_name = ImageSubscriber("Select an image to measure",
                                                "None",
                                                doc="""Select the
            grayscale image you want to measure the entropy of.""")

        self.bin_number = Integer(
            "Input number of bins",
            6,
            minval=3,
            maxval=60,
            doc=
            """Number of radial bins to divide your object into.  The minimum number
            of bins allowed is 3, the maximum number is 60.""")

        self.intensity_measurement = Choice(
            "Which intensity measurement should be used?",
            ['Mean', 'Median', 'Integrated'],
            value='Mean',
            doc="""
            Whether each wedge's mean, median, or integrated intensity
            should be used to calculate the entropy.""")
Beispiel #8
0
    def add_bins_cb(self, can_remove=True):
        '''Add an histogram to the bin_groups collection
        
        can_delete - set this to False to keep from showing the "remove"
                     button for histograms that must be present.
        '''
        group = cps.SettingsGroup()
        if can_remove:
            group.append("divider", cps.Divider(line=False))
        group.append(
            'bins',
            Integer(
                "Number of bins",
                len(self.bins_groups) + 3,
                doc=
                """How much bins do you want in your histogram? You can calculate several histograms with different number of bins using the "Add another histogram" button."""
            ))

        if can_remove:
            group.append(
                "remover",
                cps.do_something.RemoveSettingButton("",
                                                     "Remove this histogram",
                                                     self.bins_groups, group))
        self.bins_groups.append(group)
Beispiel #9
0
    def add_channel(self, can_remove=True):
        """Add another channel to the channels list"""
        group = SettingsGroup()
        group.can_remove = can_remove
        group.append(
            "channel_choice",
            Integer(
                text="Channel number",
                value=len(self.channels) + 1,
                minval=1,
                doc="""\
*(Used only when splitting images)*

This setting chooses a channel to be processed. For example, *1*
is the first
channel in a .TIF or the red channel in a traditional image file.
*2* and *3* are the second and third channels of a TIF or
the green and blue channels in other formats. *4* is the
transparency channel for image formats that support transparency and is
channel # 4 for a .TIF file. **ColorToGray** will fail to process an
image if you select a channel that is not supported by that image, for
example, “5” for a three-channel .PNG file.""",
            ),
        )

        group.append(
            "contribution",
            Float(
                "Relative weight of the channel",
                1,
                0,
                doc="""\
*(Used only when combining channels)*

Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.""",
            ),
        )

        group.append(
            "image_name",
            ImageName(
                "Image name",
                value="Channel%d" % (len(self.channels) + 1),
                doc="""\
*(Used only when splitting images)*

Select the name of the output grayscale image.""",
            ),
        )

        if group.can_remove:
            group.append(
                "remover",
                RemoveSettingButton("", "Remove this channel", self.channels, group),
            )
        self.channels.append(group)
Beispiel #10
0
    def create_settings(self):
        super(GaussianFilter, self).create_settings()

        self.sigma = Integer(
            text="Sigma",
            value=1,
            doc=
            "Standard deviation of the kernel to be used for blurring. Larger sigmas induce more blurring.",
        )
    def create_settings(self):
        """Create the module settings and name the module"""
        self.wants_default_output_directory = Binary(
            "Store batch files in default output folder?",
            True,
            doc="""\
Select "*Yes*" to store batch files in the Default Output folder.
Select "*No*" to enter the path to the folder that will be used to
store these files. The Default Output folder can be set by clicking the "View output settings" button in the main CP window, or in CellProfiler Preferences. """
            % globals(),
        )

        self.custom_output_directory = Text(
            "Output folder path",
            get_default_output_directory(),
            doc=
            "Enter the path to the output folder. (Used only if not using the default output folder)",
        )

        # Worded this way not because I am windows-centric but because it's
        # easier than listing every other OS in the universe except for VMS
        self.remote_host_is_windows = Binary(
            "Are the cluster computers running Windows?",
            False,
            doc="""\
Select "*Yes*" if the cluster computers are running one of the
Microsoft Windows operating systems. In this case, **CreateBatchFiles**
will modify all paths to use the Windows file separator (backslash \\\\ ).
Select "*No*" for **CreateBatchFiles** to modify all paths to use the
Unix or Macintosh file separator (slash / ).""" % globals(),
        )

        self.batch_mode = Binary("Hidden: in batch mode", False)
        self.distributed_mode = Binary("Hidden: in distributed mode", False)
        self.default_image_directory = Setting(
            "Hidden: default input folder at time of save",
            get_default_image_directory(),
        )
        self.revision = Integer("Hidden: revision number", 0)
        self.from_old_matlab = Binary("Hidden: from old matlab", False)
        self.acknowledge_old_matlab = DoSomething(
            "Could not update CP1.0 pipeline to be compatible with CP2.0.  See module notes.",
            "OK",
            self.clear_old_matlab,
        )
        self.mappings = []
        self.add_mapping()
        self.add_mapping_button = DoSomething(
            "",
            "Add another path mapping",
            self.add_mapping,
            doc="""\
Use this option if another path must be mapped because there is a difference
between how the local computer sees a folder location vs. how the cluster
computer sees the folder location.""",
        )
Beispiel #12
0
    def create_settings(self):
        super(ReduceNoise, self).create_settings()

        self.size = Integer(
            text="Size", value=7, doc="Size of the patches to use for noise reduction."
        )

        self.distance = Integer(
            text="Distance",
            value=11,
            doc="Maximal distance in pixels to search for patches to use for denoising.",
        )

        self.cutoff_distance = Float(
            text="Cut-off distance",
            value=0.1,
            doc="""\
The permissiveness in accepting patches. Increasing the cut-off distance increases
the smoothness of the image. Likewise, decreasing the cut-off distance decreases the smoothness of the
image.
            """,
        )
    def create_settings(self):
        # choose the tracked objects to measure TrAM on
        self.object_name = LabelSubscriber(
                "Tracked objects", "None", doc="""
            Select the tracked objects for computing TrAM.""")

        # which measurements will go into the TrAM computation
        self.tram_measurements = MeasurementMultiChoiceForCategory(
            "TrAM measurements", category_chooser=self.object_name, doc="""
            These are measurements for the selected tracked objects which
            will be used in the TrAM computation. At least one must be selected.""")

        # Treat X-Y value pairs as isotropic in the TrAM measure?
        self.isotropic = cps.Binary(
            'Isotropic XY metric?', True, doc="""
            If selected (the default) then measurements that are available
            as X-Y pairs (e.g. location) will be have an isotropic
            metric applied in TrAM. Note that the X-Y-Z extension of this feature
            is not currently available.
            """)

        # number of spline knots
        self.num_knots = Integer(
            "Number of spline knots", 4, minval=self.MIN_NUM_KNOTS, doc="""
            The number of knots (indpendent values) used
            when computing smoothing splines. This should be around 1/5th the number
            of frames for reasonably oversampled time lapse sequences, and must be 3
            or greater. It is approximately the maximum number of wiggles expected in
            well-tracked trajectories
            """)

        # TrAM exponent
        self.tram_exponent = Float(
            "TrAM exponent", 0.5, minval=0.01, maxval=1.0, doc="""
            This number is between 0.01 and 1 (default 0.5), and specifies how
            strongly simultaneous sudden changes in multiple features synergize in
            the TrAM metric. A lower value signifies higher synergy (at the risk of
            missing tracking failures that are reflected in only some of the features).
            """)
Beispiel #14
0
    def create_settings(self):
        super(MedianFilter, self).create_settings()

        self.window = Integer(
            text="Window",
            value=3,
            minval=0,
            doc="""\
Dimension in each direction for computing the median filter. Use a window with a small size to
remove noise that's small in size. A larger window will remove larger scales of noise at the
risk of blurring other features.
""",
        )
    def create_settings(self):
        #
        # The ImageNameSubscriber "subscribes" to all ImageNameProviders in
        # prior modules. Modules before yours will put images into CellProfiler.
        # The ImageSubscriber gives your user a list of these images
        # which can then be used as inputs in your module.
        #
        self.input_image_name = ImageSubscriber(
            # The text to the left of the edit box
            text="Input image name:",
            # reST help that gets displayed when the user presses the
            # help button to the right of the edit box.
            doc="""\
This is the image that the module operates on. You can choose any image
that is made available by a prior module.

**MeasurementTemplate** will measure something about this image.
""",
        )

        #
        # The ObjectNameSubscriber is similar to the ImageNameSubscriber.
        # It will ask the user which object to pick from the list of
        # objects provided by upstream modules.
        #
        self.input_object_name = LabelSubscriber(
            text="Input object name",
            doc="These are the objects that the module operates on.",
        )

        #
        # The radial degree is the "N" parameter in the Zernike - how many
        # inflection points there are, radiating out from the center. Higher
        # N means more features and a more detailed description
        #
        # The setting is an integer setting, bounded between 1 and 50.
        # N = 50 generates 1200 features!
        #
        self.radial_degree = Integer(
            text="Radial degree",
            value=10,
            minval=1,
            maxval=50,
            doc="""\
Calculate all Zernike features up to the given radial
degree. The Zernike function is parameterized by a radial
and azimuthal degree. The module will calculate all Zernike
features for all azimuthal degrees up to and including the
radial degree you enter here.
""",
        )
 def create_settings(self):
     """Create the settings for the module at startup.
     
     The module allows for an unlimited number of measured objects, each
     of which has an entry in self.object_groups.
     """ 
     self.image_groups = []
     self.object_groups = []
     self.scale_groups = []
     self.image_count = cps.HiddenCount(self.image_groups)
     self.object_count = cps.HiddenCount(self.object_groups)
     self.scale_count = cps.HiddenCount(self.scale_groups)
     self.add_image_cb(can_remove = False)
     self.add_images = DoSomething("", "Add another image",
                                       self.add_image_cb)
     self.image_divider = cps.Divider()
     self.add_object_cb(can_remove = True)
     self.add_objects = DoSomething("", "Add another object",
                                        self.add_object_cb)
     self.object_divider = cps.Divider()
     self.add_scale_cb(can_remove = False)
     self.add_scales = DoSomething("", "Add another scale",
                                       self.add_scale_cb)
     self.scale_divider = cps.Divider()
     
     self.wants_gabor = cps.Binary(
         "Measure Gabor features?", True, doc =
         """The Gabor features measure striped texture in an object. They
         take a substantial time to calculate. Check this setting to
         measure the Gabor features. Uncheck this setting to skip
         the Gabor feature calculation if it is not informative for your
         images""")
     self.gabor_angles = Integer("Number of angles to compute for Gabor",4,2, doc="""
     <i>(Used only if Gabor features are measured)</i><br>
     How many angles do you want to use for each Gabor texture measurement?
         The default value is 4 which detects bands in the horizontal, vertical and diagonal
         orientations.""")
     self.gabor_divider = cps.Divider()
     
     self.wants_tamura = cps.Binary(
         "Measure Tamura features?", True, doc =
         """The Tamura features are very ugly.""")
     self.tamura_feats=MultiChoice(
                 "Features to compute", F_ALL, F_ALL,
                 doc = """Tamura Features:
                     <p><ul>
                     <li><i>%(F_1)s</i> - bla.</li>
                     <li><i>%(F_2)s</i> - bla.</li>
                     <li><i>%(F_3)s</i> - bla.</li>
                     </ul><p>
                     Choose one or more features to compute.""" % globals())           
    def create_settings(self):
        super(ConvertImageToObjects, self).create_settings()

        self.cast_to_bool = Binary(text="Convert to boolean image",
                                   value=True,
                                   doc=HELP_BINARY_IMAGE)

        self.preserve_labels = Binary(
            text="Preserve original labels",
            value=False,
            doc="""\
By default, this module will re-label the input image.
Setting this to *{YES}* will ensure that the original labels 
(i.e. pixel values of the objects) are preserved.
""".format(**{"YES": "Yes"}),
        )

        self.background_label = Integer(
            text="Background label",
            value=0,
            doc="""\
Consider all pixels with this value as background pixels, and label them as 0. 
By default, 0-valued pixels are considered as background pixels.
""",
        )

        self.connectivity = Integer(
            text="Connectivity",
            minval=0,
            value=0,
            doc="""\
Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. 
Accepted values are ranging from 1 to the number of dimensions of the input. 
If set to 0, a full connectivity of the input dimension is used.
""",
        )
Beispiel #18
0
    def add_scale(self, removable=True):
        """

        Add a scale to the scale_groups collection

        :param removable: set this to False to keep from showing the "remove" button for scales that must be present.

        """
        group = SettingsGroup()

        if removable:
            group.append("divider", Divider(line=False))

        scale = Integer(
            doc="""\
You can specify the scale of texture to be measured, in pixel units; the
texture scale is the distance between correlated intensities in the
image. A higher number for the scale of texture measures larger patterns
of texture whereas smaller numbers measure more localized patterns of
texture. It is best to measure texture on a scale smaller than your
objects’ sizes, so be sure that the value entered for scale of texture
is smaller than most of your objects. For very small objects (smaller
than the scale of texture you are measuring), the texture cannot be
measured and will result in a undefined value in the output file.
""",
            text="Texture scale to measure",
            value=len(self.scale_groups) + 3,
        )

        group.append("scale", scale)

        if removable:
            remove_setting = RemoveSettingButton(entry=group,
                                                 label="Remove this scale",
                                                 list=self.scale_groups,
                                                 text="")

            group.append("remover", remove_setting)

        self.scale_groups.append(group)
 def add_scale_cb(self, can_remove = True):
     '''Add a scale to the scale_groups collection
     
     can_delete - set this to False to keep from showing the "remove"
                  button for scales that must be present.
     '''
     group = cps.SettingsGroup()
     if can_remove:
         group.append("divider", cps.Divider(line=False))
     group.append('scale', 
                  Integer("Texture scale to measure",
                              len(self.scale_groups)+3,
                              doc="""You can specify the scale of texture to be measured, in pixel units; 
                              the texture scale is the distance between correlated intensities in the image. A 
                              higher number for the scale of texture measures larger patterns of 
                              texture whereas smaller numbers measure more localized patterns of 
                              texture. It is best to measure texture on a scale smaller than your 
                              objects' sizes, so be sure that the value entered for scale of texture is 
                              smaller than most of your objects. For very small objects (smaller than 
                              the scale of texture you are measuring), the texture cannot be measured 
                              and will result in a undefined value in the output file."""))
     group.append('angles', MultiChoice(
         "Angles to measure", H_ALL, H_ALL,
     doc = """The Haralick texture measurements are based on the correlation
     between pixels offset by the scale in one of four directions:
     <p><ul>
     <li><i>%(H_HORIZONTAL)s</i> - the correlated pixel is "scale" pixels
     to the right of the pixel of interest.</li>
     <li><i>%(H_VERTICAL)s</i> - the correlated pixel is "scale" pixels
     below the pixel of interest.</li>
     <li><i>%(H_DIAGONAL)s</i> - the correlated pixel is "scale" pixels
     to the right and "scale" pixels below the pixel of interest.</li>
     <li><i>%(H_ANTIDIAGONAL)s</i> - the correlated pixel is "scale"
     pixels to the left and "scale" pixels below the pixel of interest.</li>
     </ul><p>
     Choose one or more directions to measure.""" % globals()))
                             
     if can_remove:
         group.append("remover", cps.do_something.RemoveSettingButton("", "Remove this scale", self.scale_groups, group))
     self.scale_groups.append(group)
    def create_settings(self):
        """Create the settings & name the module"""
        self.images_list = ImageListSubscriber(
            "Select images to measure",
            [],
            doc=
            """Select the grayscale images whose intensity you want to measure.""",
        )

        self.divider = Divider(line=False)
        self.wants_objects = Binary(
            "Measure the intensity only from areas enclosed by objects?",
            False,
            doc="""\
        Select *Yes* to measure only those pixels within an object type you
        choose, identified by a prior module. Note that this module will
        aggregate intensities across all objects in the image: to measure each
        object individually, see **MeasureObjectIntensity** instead.
        """,
        )

        self.objects_list = LabelListSubscriber(
            "Select input object sets",
            [],
            doc=
            """Select the object sets whose intensity you want to measure.""",
        )

        self.nchannels = Integer(
            "How many channels does the image have?",
            1,
            doc="""
            Indicate how many planes this image have. This is needed as
            the cellprofiler pipeline needs to be independent of the actuall
            image data.
            """,
        )
Beispiel #21
0
    def create_settings(self):
        self.images_list = ImageListSubscriber(
            "Select images to measure",
            [],
            doc=
            """Select the grayscale images whose intensity you want to measure.""",
        )
        self.divider = Divider()
        self.objects_list = LabelListSubscriber(
            "Select objects to measure",
            [],
            doc=
            """Select the object sets whose intensity you want to measure.""",
        )

        self.nchannels = Integer(
            "How many channels does the image have?",
            1,
            doc="""
            Indicate how many planes this image have. This is needed as
            the cellprofiler pipeline needs to be independent of the actuall
            image data.
            """,
        )
    def create_settings(self):
        """Create the settings for the module

        Create the settings for the module during initialization.
        """
        self.image_name = ImageSubscriber(
            "Select the input image",
            "None",
            doc="""\
The name of a binary image from a previous module. **IdentifyDeadWorms**
will use this image to establish the foreground and background for the
fitting operation. You can use **ApplyThreshold** to threshold a
grayscale image and create the binary mask. You can also use a module
such as **IdentifyPrimaryObjects** to label each worm and then use
**ConvertObjectsToImage** to make the result a mask.
""",
        )

        self.object_name = LabelName(
            "Name the dead worm objects to be identified",
            "DeadWorms",
            doc="""\
This is the name for the dead worm objects. You can refer
to this name in subsequent modules such as
**IdentifySecondaryObjects**""",
        )

        self.worm_width = Integer(
            "Worm width",
            10,
            minval=1,
            doc="""\
This is the width (the short axis), measured in pixels,
of the diamond used as a template when
matching against the worm. It should be less than the width
of a worm.""",
        )

        self.worm_length = Integer(
            "Worm length",
            100,
            minval=1,
            doc="""\
This is the length (the long axis), measured in pixels,
of the diamond used as a template when matching against the
worm. It should be less than the length of a worm""",
        )

        self.angle_count = Integer(
            "Number of angles",
            32,
            minval=1,
            doc="""\
This is the number of different angles at which the template will be
tried. For instance, if there are 12 angles, the template will be
rotated by 0°, 15°, 30°, 45° … 165°. The shape is bilaterally symmetric;
that is, you will get the same shape after rotating it by 180°.
""",
        )

        self.wants_automatic_distance = Binary(
            "Automatically calculate distance parameters?",
            True,
            doc="""\
This setting determines whether or not **IdentifyDeadWorms**
automatically calculates the parameters used to determine whether two
found-worm centers belong to the same worm.

Select "*Yes*" to have **IdentifyDeadWorms** automatically calculate
the distance from the worm length and width. Select "*No*" to set the
distances manually.
"""
            % globals(),
        )

        self.space_distance = Float(
            "Spatial distance",
            5,
            minval=1,
            doc="""\
*(Used only if not automatically calculating distance parameters)*

Enter the distance for calculating the worm centers, in units of pixels.
The worm centers must be at least many pixels apart for the centers to
be considered two separate worms.
""",
        )

        self.angular_distance = Float(
            "Angular distance",
            30,
            minval=1,
            doc="""\
*(Used only if automatically calculating distance parameters)*

**IdentifyDeadWorms** calculates the worm centers at different angles.
Two worm centers are considered to represent different worms if their
angular distance is larger than this number. The number is measured in
degrees.
""",
        )
    def create_settings(self):
        #
        # The ImageSubscriber "subscribes" to all ImageNameProviders in
        # prior modules. Modules before yours will put images into CellProfiler.
        # The ImageSubscriber gives your user a list of these images
        # which can then be used as inputs in your module.
        #
        self.input_image_name = ImageSubscriber(
            # The text to the left of the edit box
            "Input image name",
            # HTML help that gets displayed when the user presses the
            # help button to the right of the edit box
            doc="""This is the image that the module operates on. You can
            choose any image that is made available by a prior module.
            <br>
            <b>ImageTemplate</b> will do something to this image.
            """)
        #
        # The text.ImageName makes the image available to subsequent
        # modules.
        #
        self.output_image_name = ImageName(
            "Output image name",
            # The second parameter holds a suggested name for the image.
            "OutputImage",
            doc="""This is the image resulting from the operation.""")
        #
        # Here's a choice box - the user gets a drop-down list of what
        # can be done.
        #
        self.transform_choice = Choice(
            "Transform choice",
            # The choice takes a list of possibilities. The first one
            # is the default - the one the user will typically choose.
            [
                M_FOURIER, M_SIMONCELLI_P, M_SIMONCELLI_R, M_TEST_FOURIER,
                M_TEST_SIMONCELLI_P, M_TEST_SIMONCELLI_R, M_HAAR_S, M_HAAR_T,
                M_TEST_HAAR, M_CHEBYSHEV_T
            ],
            #
            # Here, in the documentation, we do a little trick so that
            # we use the actual text that's displayed in the documentation.
            #
            # %(GRADIENT_MAGNITUDE)s will get changed into "Gradient magnitude"
            # etc. Python will look in globals() for the "GRADIENT_" names
            # and paste them in where it sees %(GRADIENT_...)s
            #
            # The <ul> and <li> tags make a neat bullet-point list in the docs
            #
            doc='''There are several transforms available:
             <ul><li><i>Fourier Transform:</i> Blabla. </li>
             <li><i>Wavelet Transform:</i> Blabla. </li>
             <li><i>Chebyshev Transform:</i> Blabla. </li></ul>''' % globals())
        #
        # We use a float setting so that the user can give us a number
        # for the scale. The control will turn red if the user types in
        # an invalid scale.
        #
        self.scale = Integer(
            "Scale",
            # The default value is 1 - a short-range scale
            3,
            # We don't let the user type in really small values
            minval=1,
            # or large values
            maxval=100,
            doc="""This is a scaling factor that supplies the sigma for
            a gaussian that's used to smooth the image. The gradient is
            calculated on the smoothed image, so large scales will give
            you long-range gradients and small scales will give you
            short-range gradients""")

        self.M = Integer(
            "Order",
            # The default value is 1 - a short-range scale
            0,
            # We don't let the user type in really small values
            minval=0,
            # or large values
            maxval=50,
            doc=
            """This is the order of the Chebyshev Transform. A value of 0 will use the order matching the image dimensions."""
        )
    def create_settings(self):
        """Create your settings by subclassing this function

        create_settings is called at the end of initialization.
        """
        self.grid_name = GridSubscriber(
            "Select the defined grid",
            "None",
            doc=
            """Select the name of a grid created by a previous **DefineGrid** module.""",
        )

        self.output_objects_name = LabelName(
            "Name the objects to be identified",
            "Wells",
            doc="""\
Enter the name of the grid objects identified by this module. These objects
will be available for further measurement and processing in subsequent modules.""",
        )

        self.shape_choice = Choice(
            "Select object shapes and locations",
            [
                SHAPE_RECTANGLE, SHAPE_CIRCLE_FORCED, SHAPE_CIRCLE_NATURAL,
                SHAPE_NATURAL
            ],
            doc="""\
Use this setting to choose the method to be used to determine the grid
objects’ shapes and locations:

-  *%(SHAPE_RECTANGLE)s:* Each object will be created as a rectangle,
   completely occupying the entire grid compartment (rectangle). This
   option creates the rectangular objects based solely on the grid’s
   specifications, not on any previously identified guiding objects.
-  *%(SHAPE_CIRCLE_FORCED)s:* Each object will be created as a circle,
   centered in the middle of each grid compartment. This option places
   the circular objects’ locations based solely on the grid’s
   specifications, not on any previously identified guiding objects. The
   radius of all circles in a grid will be constant for the entire grid
   in each image cycle, and can be determined automatically for each
   image cycle based on the average radius of previously identified
   guiding objects for that image cycle, or instead it can be specified
   as a single radius for all circles in all grids in the entire
   analysis run.
-  *%(SHAPE_CIRCLE_NATURAL)s:* Each object will be created as a
   circle, and each circle’s location within its grid compartment will
   be determined based on the location of any previously identified
   guiding objects within that grid compartment. Thus, if a guiding
   object lies within a particular grid compartment, that object’s
   center will be the center of the created circular object. If no
   guiding objects lie within a particular grid compartment, the
   circular object is placed within the center of that grid compartment.
   If more than one guiding object lies within the grid compartment,
   they will be combined and the centroid of this combined object will
   be the location of the created circular object. Note that guiding
   objects whose centers are close to the grid edge are ignored.
-  *%(SHAPE_NATURAL)s:* Within each grid compartment, the object will
   be identified based on combining all of the parts of guiding objects,
   if any, that fall within the grid compartment. Note that guiding
   objects whose centers are close to the grid edge are ignored. If a
   guiding object does not exist within a grid compartment, an object
   consisting of one single pixel in the middle of the grid compartment
   will be created.
""" % globals(),
        )

        self.diameter_choice = Choice(
            "Specify the circle diameter automatically?",
            [AM_AUTOMATIC, AM_MANUAL],
            doc="""\
*(Used only if "Circle" is selected as object shape)*

There are two methods for selecting the circle diameter:

-  *%(AM_AUTOMATIC)s:* Uses the average diameter of previously
   identified guiding objects as the diameter.
-  *%(AM_MANUAL)s:* Lets you specify the diameter directly, as a
   number.
""" % globals(),
        )

        self.diameter = Integer(
            "Circle diameter",
            20,
            minval=2,
            doc="""\
*(Used only if "Circle" is selected as object shape and diameter is
specified manually)*

Enter the diameter to be used for each grid circle, in pixels.
{dist}
""".format(dist=HELP_ON_MEASURING_DISTANCES),
        )

        self.guiding_object_name = LabelSubscriber(
            "Select the guiding objects",
            "None",
            doc="""\
*(Used only if "Circle" is selected as object shape and diameter is
specified automatically, or if "Natural Location" is selected as the
object shape)*

Select the names of previously identified objects that will be used to
guide the shape and/or location of the objects created by this module,
depending on the method chosen.
""",
        )
class MeasureTrackQuality(cpm.Module):
    module_name = "MeasureTrackQuality"
    category = "Measurement"
    variable_revision_number = 1

    CAT_MEASURE_TRACK_QUALITY = "MeasureTrackQuality"
    MEAS_TRAM = "TrAM"
    MEAS_LABELS = "Labels"
    MEAS_PARENT = "Is_Parent"
    MEAS_SPLIT = "Split_Trajectory"
    FULL_TRAM_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_TRAM)
    FULL_LABELS_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_LABELS)
    FULL_PARENT_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_PARENT)
    FULL_SPLIT_MEAS_NAME = "{}_{}".format(CAT_MEASURE_TRACK_QUALITY, MEAS_SPLIT)
    IMAGE_NUM_KEY = "Image"
    MIN_NUM_KNOTS = 3

    LABELS_KEY = "labels"
    IMAGE_NUMS_KEY = "image_nums"
    OBJECT_NUMS_KEY = "object_nums"
    PARENT_OBJECT_NUMS_KEY = "parent_object_nums"
    TRAM_KEY = "TrAM"
    SPLIT_KEY = "split"
    PARENT_KEY = "parent"

    def create_settings(self):
        # choose the tracked objects to measure TrAM on
        self.object_name = LabelSubscriber(
                "Tracked objects", "None", doc="""
            Select the tracked objects for computing TrAM.""")

        # which measurements will go into the TrAM computation
        self.tram_measurements = MeasurementMultiChoiceForCategory(
            "TrAM measurements", category_chooser=self.object_name, doc="""
            These are measurements for the selected tracked objects which
            will be used in the TrAM computation. At least one must be selected.""")

        # Treat X-Y value pairs as isotropic in the TrAM measure?
        self.isotropic = cps.Binary(
            'Isotropic XY metric?', True, doc="""
            If selected (the default) then measurements that are available
            as X-Y pairs (e.g. location) will be have an isotropic
            metric applied in TrAM. Note that the X-Y-Z extension of this feature
            is not currently available.
            """)

        # number of spline knots
        self.num_knots = Integer(
            "Number of spline knots", 4, minval=self.MIN_NUM_KNOTS, doc="""
            The number of knots (indpendent values) used
            when computing smoothing splines. This should be around 1/5th the number
            of frames for reasonably oversampled time lapse sequences, and must be 3
            or greater. It is approximately the maximum number of wiggles expected in
            well-tracked trajectories
            """)

        # TrAM exponent
        self.tram_exponent = Float(
            "TrAM exponent", 0.5, minval=0.01, maxval=1.0, doc="""
            This number is between 0.01 and 1 (default 0.5), and specifies how
            strongly simultaneous sudden changes in multiple features synergize in
            the TrAM metric. A lower value signifies higher synergy (at the risk of
            missing tracking failures that are reflected in only some of the features).
            """)

    def settings(self):
        return [self.object_name, self.tram_measurements, self.isotropic, self.num_knots, self.tram_exponent]

    def validate_module(self, pipeline):
        '''Make sure that the user has selected at least one measurement for TrAM and that there are tracking data.'''
        if len(self.get_selected_tram_measurements()) == 0:
            raise cps.ValidationError(
                    "Please select at least one TrAM measurement for tracking of {}".format(self.object_name.value),
                    self.tram_measurements)

        # check on available tracking columns for the selected object
        obj_name = self.object_name.value
        mc = pipeline.get_measurement_columns()
        num_tracking_cols = len([entry for entry in mc if entry[0] == obj_name and entry[1].startswith(trackobjects.F_PREFIX)])
        if num_tracking_cols == 0:
            msg = "No {} data available for {}. Please select an object with tracking data.".format(trackobjects.F_PREFIX, obj_name)
            raise cps.ValidationError(msg, self.object_name)

    def run(self, workspace):
        pass

    def display_post_group(self, workspace, figure):
        if self.show_window:
            figure.set_subplots((1,1))
            figure.subplot_histogram(0, 0, workspace.display_data.tram_values, bins=40, xlabel="TrAM",
                                     title="TrAM for {}".format(self.object_name.value))

    def post_group(self, workspace, grouping):
        self.show_window = True

        measurements = workspace.measurements
        obj_name = self.object_name.value # the object the user has selected

        # get the image numbers
        group_number = grouping["Group_Number"]
        groupings = workspace.measurements.get_groupings(grouping)
        img_numbers = sum([numbers for group, numbers in groupings if int(group["Group_Number"]) == group_number], [])

        num_images = len(img_numbers)
        if num_images < TRAM_MIN_TIME_POINTS:
            logger.warning("Need at least {} time points to compute TrAM. Found {}."
                           .format(TRAM_MIN_TIME_POINTS, num_images))

        # get vector of tracking label for each data point
        feature_names = measurements.get_feature_names(obj_name)
        tracking_label_feature_name = [name for name in feature_names
                                       if name.startswith("{}_{}".format(trackobjects.F_PREFIX, trackobjects.F_LABEL))][0]
        label_vals = measurements.get_measurement(obj_name, tracking_label_feature_name, img_numbers)
        label_vals_flattened_all = numpy.concatenate(label_vals).ravel().tolist()
        # determine which indexes we should keep. Get rid of any nan label values
        not_nan_indices = [i for i, label in enumerate(label_vals_flattened_all) if not numpy.isnan(label)]
        label_vals_flattened = [label_vals_flattened_all[i] for i in not_nan_indices] # excludes nan

        # convenience function to flatten and remove values corresponding to nan labels
        def extract_flattened_measurements_for_valid_labels(lol):
            return [numpy.concatenate(lol).tolist()[i] for i in not_nan_indices]

        # function to get a tuple dictionary entry relating feature name with data values
        def get_feature_values_tuple(sel):
            feat_obj_name, feat_name = sel.split("|")
            vals = measurements.get_measurement(feat_obj_name, feat_name, measurements.get_image_numbers())
            vals_flattened = extract_flattened_measurements_for_valid_labels(vals)
            return (feat_name, vals_flattened)

        # get all the data for TrAM
        selections = self.get_selected_tram_measurements() # measurements that the user wants to run TrAM on
        all_values_dict = dict(get_feature_values_tuple(sel) for sel in selections)
        # determine if there are any potential isotropic (XY) pairs
        if self.isotropic.value:
            isotropic_pairs = MeasureTrackQuality.Determine_Isotropic_pairs(all_values_dict.keys())
        else:
            isotropic_pairs = []

        # sanity check: make sure all vectors have the same length
        vec_lengths = set([len(value) for value in all_values_dict.values()])
        assert len(vec_lengths) == 1, "Measurement vectors have differing lengths"

        # get vector of image numbers into the dict
        counts = [len([v for v in x if not numpy.isnan(v)]) for x in label_vals] # number of non-nan labels at each time point
        image_vals = [[image for _ in range(count)] for image, count in zip(img_numbers, counts)] # repeat image number
        image_vals_flattened = sum(image_vals, [])

        # determine max lifetime by label so we can select different object behaviors
        lifetime_feature_name = [name for name in feature_names
                                 if name.startswith("{}_{}".format(trackobjects.F_PREFIX, trackobjects.F_LIFETIME))][0]
        lifetime_vals_flattened =\
            extract_flattened_measurements_for_valid_labels(measurements.get_measurement(obj_name,
                                                                                         lifetime_feature_name,
                                                                                         img_numbers))
        max_lifetime_by_label = dict(max(lifetimes)
                                     for label, lifetimes
                                     in itertools.groupby(zip(label_vals_flattened, lifetime_vals_flattened),
                                                          lambda x: x[0]))


        # Labels for objects that are tracked the whole time.
        label_counts = Counter(label_vals_flattened) # dict with count of each label
        labels_for_complete_trajectories = [label for label in max_lifetime_by_label.keys()
                                            if max_lifetime_by_label[label] == num_images
                                            and label_counts[label] == num_images]
        # labels for objects there the whole time but result from splitting
        labels_for_split_trajectories = [label for label in max_lifetime_by_label.keys()
                                         if max_lifetime_by_label[label] == num_images
                                         and label_counts[label] > num_images
                                         and not numpy.isnan(label)]


        # create dictionary to translate from label to object number in last frame. This is how we will store results.
        object_nums = measurements.get_measurement(obj_name, M_NUMBER_OBJECT_NUMBER, img_numbers) # list of lists
        object_nums_flattened = extract_flattened_measurements_for_valid_labels(object_nums)
        object_count_by_image = {img_num:len(v) for img_num, v in zip(img_numbers, object_nums)}

        # create a mapping from object number in an image to its index in the data array for later
        index_by_img_and_object = {(img_num, obj_num): index for img_num, obj_nums in zip(img_numbers, object_nums)
                                   for index, obj_num in enumerate(obj_nums)}

        # now restrict vectors only to labels of complete trajectories
        complete_trajectory_indices = [i for i, label in enumerate(label_vals_flattened) if label in labels_for_complete_trajectories]
        all_values_dict_complete_trajectories = {k : [v[i] for i in complete_trajectory_indices] for k, v in all_values_dict.items()}

        # compute typical inter-timepoint variation for complete trajectories only.
        label_vals_flattened_complete_trajectories = [label_vals_flattened[i] for i in complete_trajectory_indices]
        image_vals_flattened_complete_trajectories = [image_vals_flattened[i] for i in complete_trajectory_indices]
        tad = MeasureTrackQuality.compute_typical_deviations(all_values_dict_complete_trajectories,
                                                               label_vals_flattened_complete_trajectories,
                                                               image_vals_flattened_complete_trajectories)

        # put all the data into a 2D array and normalize by typical deviations
        all_data_array = numpy.column_stack(all_values_dict.values())
        tram_feature_names = all_values_dict_complete_trajectories.keys()
        inv_devs = numpy.diag([1 / tad[k] for k in tram_feature_names]) # diagonal matrix of inverse typical deviation
        normalized_all_data_array = numpy.dot(all_data_array, inv_devs) # perform the multiplication

        # this is how we identify our TrAM measurements to objects
        next_available_tram_label = 0

        # Compute TrAM for each complete trajectory. Store result in tram_dict using TrAM label as key.
        tram_dict = dict()
        for label in labels_for_complete_trajectories:
            indices = [i for i, lab in enumerate(label_vals_flattened) if lab == label]

            if len(indices) < TRAM_MIN_TIME_POINTS: # not enough data points
                tram = None
            else:
                tram = MeasureTrackQuality.compute_TrAM(tram_feature_names, normalized_all_data_array,
                                                        image_vals_flattened, indices, self.num_knots.get_value(),
                                                        self.tram_exponent.get_value(), isotropic_pairs)

            obj_nums = {image_vals_flattened[i] : object_nums_flattened[i] for i in indices} # pairs of image and object
            tram_dict.update({next_available_tram_label : {self.TRAM_KEY : tram, self.OBJECT_NUMS_KEY : obj_nums, self.SPLIT_KEY : 0}})
            next_available_tram_label += 1

        # now compute TrAM for split trajectories
        tracking_info_dict = dict()
        tracking_info_dict[self.LABELS_KEY] = label_vals_flattened
        tracking_info_dict[self.IMAGE_NUMS_KEY] = image_vals_flattened
        tracking_info_dict[self.OBJECT_NUMS_KEY] = object_nums_flattened

        parent_object_text_start = "{}_{}".format(trackobjects.F_PREFIX, trackobjects.F_PARENT_OBJECT_NUMBER)
        parent_object_feature = next(feature_name for feature_name in feature_names
                                     if feature_name.startswith(parent_object_text_start))
        tracking_info_dict[self.PARENT_OBJECT_NUMS_KEY] = \
            extract_flattened_measurements_for_valid_labels(measurements.get_measurement(obj_name,
                                                                                         parent_object_feature,
                                                                                         img_numbers))

        split_trajectories_tram_dict = \
            self.evaluate_tram_for_split_objects(labels_for_split_trajectories, tram_feature_names,
                                                 isotropic_pairs, normalized_all_data_array,
                                                 tracking_info_dict, next_available_tram_label)
        tram_dict.update(split_trajectories_tram_dict) # store them with the others

        def get_element_or_default_for_None(x, index, default):
            if x is None:
                return default
            else:
                return x[index]

        results_to_store_by_img = {img_num: [None for _ in range(object_count_by_image[img_num])]
                                   for img_num in img_numbers} # Seems excessive. there must be a better way.

        # cycle through each tram computed
        for tram_label, traj_dict in tram_dict.items():
            tram = traj_dict[self.TRAM_KEY]
            split_flag = traj_dict[self.SPLIT_KEY]
            for img_num, object_num in traj_dict[self.OBJECT_NUMS_KEY].items(): # every object across images for this tram
                index = index_by_img_and_object[(img_num, object_num)]
                result_dict = results_to_store_by_img[img_num][index]

                if result_dict is None:
                    result_dict = dict() # initialize
                    results_to_store_by_img[img_num][index] = result_dict # store it
                    result_dict.update({self.PARENT_KEY:0})
                    result_dict.update({self.TRAM_KEY:tram})
                    result_dict.update({self.LABELS_KEY:[tram_label]})
                else: # if there is already a TRAM_KEY then we are a parent and don't have a valid TrAM
                    result_dict.update({self.PARENT_KEY:1})
                    result_dict.update({self.TRAM_KEY:None})
                    previous_list = result_dict[self.LABELS_KEY]
                    previous_list.append(tram_label)

                result_dict.update({self.SPLIT_KEY: split_flag})

        # Loop over all images and save out
        tram_values_to_save = list()
        parent_values_to_save = list()
        split_values_to_save = list()
        label_values_to_save = list()

        for img_num, vec in results_to_store_by_img.items():
            tram_values_to_save.append([get_element_or_default_for_None(v, self.TRAM_KEY, None) for v in vec])
            parent_values_to_save.append([get_element_or_default_for_None(v, self.PARENT_KEY, None) for v in vec])
            split_values_to_save.append([get_element_or_default_for_None(v, self.SPLIT_KEY, None) for v in vec])
            label_values_to_save.append([get_element_or_default_for_None(v, self.LABELS_KEY, None) for v in vec])

        img_nums = results_to_store_by_img.keys()
        workspace.measurements.add_measurement(obj_name, self.FULL_TRAM_MEAS_NAME, tram_values_to_save, image_set_number=img_nums)
        workspace.measurements.add_measurement(obj_name, self.FULL_PARENT_MEAS_NAME, parent_values_to_save, image_set_number=img_nums)
        workspace.measurements.add_measurement(obj_name, self.FULL_SPLIT_MEAS_NAME, split_values_to_save, image_set_number=img_nums)
        workspace.measurements.add_measurement(obj_name, self.FULL_LABELS_MEAS_NAME, label_values_to_save, image_set_number=img_nums)

        # store the existing TrAM values for the histogram display
        workspace.display_data.tram_values = [d.get(self.TRAM_KEY)
                                              for d in tram_dict.values() if d.get(self.TRAM_KEY) is not None]

    @staticmethod
    def compute_TrAM(tram_feature_names, normalized_data_array, image_vals_flattened, indices,
                     num_knots, tram_exponent, isotropic_pairs):
        """
        Compute the TrAM statistic for a single trajectory
        
        :param tram_feature_names: Names of the features to use (in order of the columns in normalized_data_array) 
        :param normalized_data_array: Source of data (normalized to typical absolute deviations). Columns correspond
        to TrAM features, and rows are for all objects across images
        :param image_vals_flattened: The image numbers corresponding to rows in normalized_data_array
        :param indices: The rows in normalized_data_array which are for this trajectory
        :param num_knots: Number of knots in the smoothing spline 
        :param tram_exponent: TrAM exponent used to combine aberrations
        :param isotropic_pairs: List of XY-pairs of features which should be treated with a Euclidian metric
        :return: The computed TrAM value
        """
        normalized_data_for_label = normalized_data_array[indices,:]  # get the corresponding data
        images = [image_vals_flattened[i] for i in indices]

        normalized_data_for_label = normalized_data_for_label[numpy.argsort(images),]  # order by image
        normalized_values_dict = {tram_feature_names[i]: normalized_data_for_label[:, i] for i in range(0, len(tram_feature_names))}

        def compute_single_aberration(normalized_values):
            """
            Figure out the deviation from smooth at each time point
            :param normalized_values: time series of values, normalized to the typical deviation
            :return: list of absolute deviation values from smooth
            """
            n = len(normalized_values)
            xs = numpy.array(range(1, n + 1), float)
            knot_deltas = (n-1.0)/(num_knots+1.0)
            knot_locs = 1 + numpy.array(range(1, num_knots)) * knot_deltas

            try:
                interp_func = scipy.interpolate.LSQUnivariateSpline(xs, normalized_values, knot_locs)
                smoothed_vals = interp_func(xs)
            except ValueError:
                smoothed_vals = numpy.zeros(len(xs)) + numpy.nan # return nan array

            return abs(normalized_values - smoothed_vals)

        # compute aberrations for each of the features
        aberration_dict = {feat_name : compute_single_aberration(numpy.array(values))
                           for feat_name, values in normalized_values_dict.items()}

        # now combine them with the appropriate power
        aberration_array = numpy.column_stack(aberration_dict.values())

        # handle Euclidian weightings
        num_isotropic = len(isotropic_pairs)
        if num_isotropic != 0:
            column_names = aberration_dict.keys()
            remaining_features = list(column_names)

            column_list = list() # we will accumulate data here
            weight_list = list() # will accumulate weights here

            for x, y in isotropic_pairs:
                # find data columns
                x_col = next(i for i, val in enumerate(column_names) if x == val)
                y_col = next(i for i, val in enumerate(column_names) if y == val)

                isotropic_vec = numpy.sqrt(numpy.apply_along_axis(numpy.mean, 1, aberration_array[:, (x_col, y_col)]))
                column_list.append(isotropic_vec)
                weight_list.append(2) # 2 data elements used to weight is twice the usual

                # remove the column names from remaining features
                remaining_features.remove(x)
                remaining_features.remove(y)

            # all remaining features have weight 1
            for feature_name in remaining_features:
                col = next(i for i, val in enumerate(column_names) if val == feature_name)
                column_list.append(aberration_array[:,col])
                weight_list.append(1)

            data_array = numpy.column_stack(column_list) # make array
            weight_array = numpy.array(weight_list, float)
            weight_array = weight_array / numpy.sum(weight_array) # normalize weights
            weight_matrix = numpy.diag(weight_array)

            pwr = numpy.power(data_array, tram_exponent)
            weighted_means = numpy.apply_along_axis(numpy.sum, 1, numpy.matmul(pwr, weight_matrix))
            tram = numpy.max(numpy.power(weighted_means, 1.0 / tram_exponent))
        else:
            pwr = numpy.power(aberration_array, tram_exponent)
            means = numpy.apply_along_axis(numpy.mean, 1, pwr)
            tram = numpy.max(numpy.power(means, 1.0 / tram_exponent))

        return tram

    def evaluate_tram_for_split_objects(self, labels_for_split_trajectories, tram_feature_names, isotropic_pairs,
                                        normalized_data_array, tracking_info_dict, next_available_tram_label):
        """
        Compute TrAM results for objects that have split trajectories        
        :param labels_for_split_trajectories: TrackObjects labels for trajectories that split.
        :param tram_feature_names:  The feature names that are used to compute TrAM.
        :param isotropic_pairs: List of feature pairs (XY) to be Euclidianized.
        :param normalized_data_array: Data for the TrAM features, normalized by typical absolute deviation.
        :param tracking_info_dict: Dictionary of other relevant information about the objects.
        :param next_available_tram_label: Tram label number. We increment this as we use it.
        :return: Dictionary whose keys are TrAM labels and values are dictionaries containing values
        for the keys TRAM_KEY, OBJECT_NUMS_KEY, SPLIT_KEY
        """

        label_vals_flattened = tracking_info_dict[self.LABELS_KEY]
        image_vals_flattened = tracking_info_dict[self.IMAGE_NUMS_KEY]
        object_nums_flattened = tracking_info_dict[self.OBJECT_NUMS_KEY]
        parent_object_nums_flattened = tracking_info_dict[self.PARENT_OBJECT_NUMS_KEY]

        first_image_num = min(image_vals_flattened)
        last_image_num = max(image_vals_flattened)

        # Make a map from (image,object_number) to flattened array index so we can find parents
        img_obj_to_index = dict([((image_vals_flattened[i], object_nums_flattened[i]), i)
                                 for i in range(0, len(image_vals_flattened))])

        # Make a map from label to object number(s) for the last image. We will work backward from these
        object_nums_for_label_last_image = defaultdict(list) # need to store lists because there can be multiple
        # Restrict to labels for split trajectories and only last image
        for label, object_num, image_num in zip(label_vals_flattened, object_nums_flattened, image_vals_flattened):
            if image_num == last_image_num and label in labels_for_split_trajectories:
                object_nums_for_label_last_image[label].append(object_num)

        # Compute TrAM for each label of split objects. They will all have
        # a complete set of predecessor objects going from the end to the start since
        # they were filtered to have a max lifetime equal to the number of frames.
        # Here we piece together the entire trajectory for each object and compute TrAM.
        # construct the object trajectory in terms of array indexes. These get placed
        # in an accumulator (list) that should be initialized as empty.
        def get_parent_indices(image_num, object_num, index_accum, object_num_accum):
            if image_num < first_image_num: return

            index = img_obj_to_index[(image_num, object_num)]
            parent_object_num = parent_object_nums_flattened[index]
            get_parent_indices(image_num - 1, parent_object_num, index_accum, object_num_accum) # recurse for all earlier

            index_accum.append(index)
            object_num_accum.append(object_num)

        # cycle through everything in our dict and compute tram. Store.
        result = dict()
        for label in object_nums_for_label_last_image.keys():
            for object_num_last_image in object_nums_for_label_last_image.get(label): # this is a list
                indices_list = list()
                object_nums_list = list()
                get_parent_indices(last_image_num, object_num_last_image, indices_list, object_nums_list)

                # Indices now contains the indices for the tracked object across images
                tram = MeasureTrackQuality.compute_TrAM(tram_feature_names, normalized_data_array, image_vals_flattened,
                                                        indices_list, self.num_knots.get_value(),
                                                        self.tram_exponent.get_value(), isotropic_pairs)

                # for each image number, the corresponding object number
                obj_nums = dict(zip([image_vals_flattened[i] for i in indices_list], object_nums_list))

                result.update({next_available_tram_label: {self.TRAM_KEY:tram, self.OBJECT_NUMS_KEY:obj_nums,
                                                           self.SPLIT_KEY:1}})
                next_available_tram_label += 1

        return result

    @staticmethod
    def compute_typical_deviations(values_dict, labels_vec, image_vec):
        """
        Compute the median absolute temporal difference in each of the features across all tracks
        
        :param values_dict: keys are feature names, values are lists of data values across images and tracks
        :param labels_vec: A list of track labels corresponding to data values in their arrays
        :param image_vec: A list of image numbers corresponding to data values in their arrays
        :return: dictionary whose keys are feature names and values are median absolute differences
        """
        # input is a list of time series lists
        def compute_median_abs_deviation(values_lists):
            return numpy.median(numpy.concatenate([numpy.abs(numpy.diff(vals)) for vals in values_lists]).ravel())

        # mapping from label to indices
        labels_dict = dict()
        labels_set = set(labels_vec)
        for label in labels_set:
            indices = [i for i, lab in enumerate(labels_vec) if lab == label] # which match
            labels_dict.update({label : indices})

        result = dict()
        # for each feature get the deltas in time
        for feat_name, values in values_dict.items():
            all_diffs = list()
            for label, indices in labels_dict.items():
                data = [values[i] for i in indices]
                images = [image_vec[i] for i in indices]
                z = sorted(zip(images, data)) # get them in time order
                ordered_data = [data for _, data in z]
                all_diffs.append(ordered_data)
            mad = compute_median_abs_deviation(all_diffs)
            result.update({feat_name : mad})


        return result

    @staticmethod
    def Determine_Isotropic_pairs(features):
        """
        Look for any pairs that end in "_X" and "_Y" or have "_X_" and "_Y_" within them
        :param features:list of names 
        :return: list of tubples containing pairs of names which can be paired using an isotropic (Euclidian) metric
        """

        # first find all the ones with a "_X$"
        features_X_1 = [feature for feature in features if re.search("_X$", feature)]
        features_X_2 = [feature for feature in features if re.search("_X_", feature)]

        # get corresponding pairs
        paired_1 = [(feature, re.sub("_X$", "_Y", feature)) for feature in features_X_1]
        paired_2 = [(feature, re.sub("_X_", "_Y_", feature)) for feature in features_X_2]

        pairs = paired_1 + paired_2

        # only return pairs where the Y feature exists
        return [(x, y) for x, y in pairs if y in features]

    # Get the selected measurements, restricted to those which start with the object name
    def get_selected_tram_measurements(self):
        # get what was selected by the user
        selections = self.tram_measurements.get_selections()

        # get the object set to work on
        object_name = self.object_name.value

        return [sel for sel in selections if sel.startswith(object_name)]

    def get_measurement_columns(self, pipeline):
        return [(self.object_name.value, self.FULL_TRAM_MEAS_NAME, COLTYPE_FLOAT),
                (self.object_name.value, self.FULL_PARENT_MEAS_NAME, COLTYPE_FLOAT),
                (self.object_name.value, self.FULL_SPLIT_MEAS_NAME, COLTYPE_FLOAT),
                (self.object_name.value, self.FULL_LABELS_MEAS_NAME, COLTYPE_BLOB)]

    def get_categories(self, pipeline, object_name):
        if object_name == self.object_name.value:
            return [self.CAT_MEASURE_TRACK_QUALITY]
        return []

    def get_measurements(self, pipeline, object_name, category):
        if object_name == self.object_name.value and category == self.CAT_MEASURE_TRACK_QUALITY:
            return [self.MEAS_TRAM, self.MEAS_PARENT, self.MEAS_SPLIT, self.MEAS_LABELS]
        return []

    def is_aggregation_module(self):
        """If true, the module uses data from other imagesets in a group

        Aggregation modules perform operations that require access to
        all image sets in a group, generally resulting in an aggregation
        operation during the last image set or in post_group. Examples are
        TrackObjects, MakeProjection and CorrectIllumination_Calculate.
        """
        return True
Beispiel #26
0
    def create_settings(self):
        self.images_list = ImageListSubscriber(
            "Select images to measure",
            [],
            doc=
            """Select the grayscale images whose intensity you want to measure.""",
        )

        self.objects_list = LabelListSubscriber(
            "Select objects to measure",
            [],
            doc="""\
        Select the objects whose texture you want to measure. If you only want
        to measure the texture for the image overall, you can remove all objects
        using the “Remove this object” button.

        Objects specified here will have their texture measured against *all*
        images specified above, which may lead to image-object combinations that
        are unnecessary. If you do not want this behavior, use multiple
        **MeasureTexture** modules to specify the particular image-object
        measures that you want.
        """,
        )

        self.gray_levels = Integer(
            "Enter how many gray levels to measure the texture at",
            256,
            2,
            256,
            doc="""\
        Enter the number of gray levels (ie, total possible values of intensity) 
        you want to measure texture at.  Measuring at more levels gives you 
        _potentially_ more detailed information about your image, but at the cost
        of somewhat decreased processing speed.  

        Before processing, your image will be rescaled from its current pixel values
        to 0 - [gray levels - 1]. The texture features will then be calculated. 

        In all CellProfiler 2 versions, this value was fixed at 8; in all 
        CellProfiler 3 versions it was fixed at 256.  The minimum number of levels is
        2, the maximum is 256.
        """,
        )

        self.scale_groups = []

        self.scale_count = HiddenCount(self.scale_groups)

        self.image_divider = Divider()

        self.object_divider = Divider()

        self.add_scale(removable=False)

        self.add_scales = DoSomething(
            callback=self.add_scale,
            label="Add another scale",
            text="",
            doc="""\
            Add an additional texture scale to measure. Useful when you
            want to measure texture features of different sizes.
            """,
        )

        self.images_or_objects = Choice(
            "Measure whole images or objects?",
            [IO_IMAGES, IO_OBJECTS, IO_BOTH],
            value=IO_BOTH,
            doc="""\
This setting determines whether the module computes image-wide
measurements, per-object measurements or both.

-  *{IO_IMAGES}:* Select if you only want to measure the texture
   across entire images.
-  *{IO_OBJECTS}:* Select if you want to measure the texture
   on a per-object basis only.
-  *{IO_BOTH}:* Select to make both image and object measurements.
""".format(
                **{
                    "IO_IMAGES": IO_IMAGES,
                    "IO_OBJECTS": IO_OBJECTS,
                    "IO_BOTH": IO_BOTH
                }),
        )
    def create_settings(self):
        super(RunStarDist, self).create_settings()

        self.model = Choice(
            text="Model",
            choices=MODEL_OPTIONS,
            value=GREY_1,
            doc="""\
StarDist comes with models for detecting nuclei. Alternatively, you can supply a custom-trained model 
generated outside of CellProfiler within Python. Custom models can be useful if working with unusual cell types.

The inbuilt fluorescent and DSB models expect greyscale images. The H&E model expects a color image as input (from 
brightfield). Custom models will require images of the type they were trained with. It should be noted that the 
models supplied with StarDist do not support 3D images, but it's possible to train and use your own.
""",
        )

        self.tile_image = Binary(
            text="Tile input image?",
            value=False,
            doc="""\
If enabled, the input image will be broken down into overlapping tiles. 
This can help to conserve memory when working with large images.

The image is split into a set number of vertical and horizontal tiles. 
The total number of tiles will be the result of multiplying the horizontal 
and vertical tile number.""",
        )

        self.n_tiles_x = Integer(text="Horizontal tiles",
                                 value=1,
                                 minval=1,
                                 doc="""\
Specify the number of tiles to break the image down into along the x-axis (horizontal)."""
                                 )

        self.n_tiles_y = Integer(text="Vertical tiles",
                                 value=1,
                                 minval=1,
                                 doc="""\
Specify the number of tiles to break the image down into along the y-axis (vertical)."""
                                 )

        self.save_probabilities = Binary(
            text="Save probability image?",
            value=False,
            doc="""
If enabled, the probability scores from the model will be recorded as a new image. 
Probability scales from 0-1, with 1 representing absolute certainty of a pixel being in a cell. 
You may want to use a custom threshold to manually generate objects.""",
        )

        self.probabilities_name = ImageName(
            "Name the probability image",
            "Probabilities",
            doc=
            "Enter the name you want to call the probability image produced by this module.",
        )

        self.model_directory = Directory("Model folder",
                                         doc=f"""\
*(Used only when using a custom pre-trained model)*

Select the folder containing your StarDist model. This should have the config, threshold and weights files 
exported after training.""")

        self.gpu_test = DoSomething(
            "",
            "Test GPU",
            self.do_check_gpu,
            doc=f"""\
Press this button to check whether a GPU is correctly configured.

If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the 
required dependencies are not installed. 
Make sure you followed the setup instructions here: https://www.tensorflow.org/install/gpu

If you don't have a GPU or it's not configured, StarDist will instead run on the CPU. 
This will be slower but should work on any system.
""",
        )
    def create_settings(self):
        super(RunCellpose, self).create_settings()

        self.expected_diameter = Integer(
            text="Expected object diameter",
            value=15,
            minval=0,
            doc="""\
The average diameter of the objects to be detected. Setting this to 0 will attempt to automatically detect object size.
Note that automatic diameter mode does not work when running on 3D images.

Cellpose models come with a pre-defined object diameter. Your image will be resized during detection to attempt to 
match the diameter expected by the model. The default models have an expected diameter of ~16 pixels, if trying to 
detect much smaller objects it may be more efficient to resize the image first using the Resize module.
""",
        )

        self.mode = Choice(
            text="Detection mode",
            choices=[MODE_NUCLEI, MODE_CELLS, MODE_CUSTOM],
            value=MODE_NUCLEI,
            doc="""\
CellPose comes with models for detecting nuclei or cells. Alternatively, you can supply a custom-trained model 
generated using the command line or Cellpose GUI. Custom models can be useful if working with unusual cell types.
""",
        )

        self.use_gpu = Binary(text="Use GPU",
                              value=False,
                              doc=f"""\
If enabled, Cellpose will attempt to run detection on your system's graphics card (GPU). 
Note that you will need a CUDA-compatible GPU and correctly configured PyTorch version, see this link for details: 
{CUDA_LINK}

If disabled or incorrectly configured, Cellpose will run on your CPU instead. This is much slower but more compatible 
with different hardware setups.

Note that, particularly when in 3D mode, lack of GPU memory can become a limitation. If a model crashes you may need to 
re-start CellProfiler to release GPU memory. Resizing large images prior to running them through the model can free up 
GPU memory.

""")

        self.use_averaging = Binary(text="Use averaging",
                                    value=True,
                                    doc="""\
If enabled, CellPose will run it's 4 inbuilt models and take a consensus to determine the results. If disabled, only a 
single model will be called to produce results. Disabling averaging is faster to run but less accurate."""
                                    )

        self.supply_nuclei = Binary(text="Supply nuclei image as well?",
                                    value=False,
                                    doc="""
When detecting whole cells, you can provide a second image featuring a nuclear stain to assist 
the model with segmentation. This can help to split touching cells.""")

        self.nuclei_image = ImageSubscriber(
            "Select the nuclei image",
            doc="Select the image you want to use as the nuclear stain.")

        self.save_probabilities = Binary(
            text="Save probability image?",
            value=False,
            doc="""
If enabled, the probability scores from the model will be recorded as a new image. 
Probability >0 is considered as being part of a cell. 
You may want to use a higher threshold to manually generate objects.""",
        )

        self.probabilities_name = ImageName(
            "Name the probability image",
            "Probabilities",
            doc=
            "Enter the name you want to call the probability image produced by this module.",
        )

        self.model_directory = Directory(
            "Location of the pre-trained model file",
            doc=f"""\
*(Used only when using a custom pre-trained model)*

Select the location of the pre-trained CellPose model file that will be used for detection."""
        )

        def get_directory_fn():
            """Get the directory for the rules file name"""
            return self.model_directory.get_absolute_path()

        def set_directory_fn(path):
            dir_choice, custom_path = self.model_directory.get_parts_from_path(
                path)

            self.model_directory.join_parts(dir_choice, custom_path)

        self.model_file_name = Filename("Pre-trained model file name",
                                        "cyto_0",
                                        get_directory_fn=get_directory_fn,
                                        set_directory_fn=set_directory_fn,
                                        doc=f"""\
*(Used only when using a custom pre-trained model)*

This file can be generated by training a custom model withing the CellPose GUI or command line applications."""
                                        )

        self.gpu_test = DoSomething(
            "",
            "Test GPU",
            self.do_check_gpu,
            doc=f"""\
Press this button to check whether a GPU is correctly configured.

If you have a dedicated GPU, a failed test usually means that either your GPU does not support deep learning or the 
required dependencies are not installed.

If you have multiple GPUs on your system, this button will only test the first one.
""",
        )

        self.flow_threshold = Float(
            text="Flow threshold",
            value=0.4,
            minval=0,
            doc=
            """Flow error threshold. All cells with errors below this threshold are kept. Recommended default is 0.4""",
        )

        self.dist_threshold = Float(
            text="Cell probability threshold",
            value=0.0,
            minval=0,
            doc=f"""\
Cell probability threshold (all pixels with probability above threshold kept for masks). Recommended default is 0.0. """,
        )
Beispiel #29
0
    def create_settings(self):
        self.object_name = LabelSubscriber(
            "Select the input objects",
            "None",
            doc="Select the objects that you want to expand or shrink.",
        )

        self.output_object_name = LabelName(
            "Name the output objects",
            "ShrunkenNuclei",
            doc="Enter a name for the resulting objects.",
        )

        self.operation = Choice(
            "Select the operation",
            O_ALL,
            doc="""\
Choose the operation that you want to perform:

-  *{O_SHRINK_INF}:* Remove all pixels but one from filled objects.
   Thin objects with holes to loops unless the “fill” option is checked.
   Objects are never lost using this module (shrinking stops when an
   object becomes a single pixel).
-  *{O_EXPAND_INF}:* Expand objects, assigning every pixel in the
   image to an object. Background pixels are assigned to the nearest
   object.
-  *{O_DIVIDE}:* Remove pixels from an object that are adjacent to
   another object’s pixels unless doing so would change the object’s
   Euler number (break an object in two, remove the object completely or
   open a hole in an object).
-  *{O_SHRINK}:* Remove pixels around the perimeter of an object unless
   doing so would change the object’s Euler number (break the object in
   two, remove the object completely or open a hole in the object). You
   can specify the number of times perimeter pixels should be removed.
   Processing stops automatically when there are no more pixels to
   remove. Objects are never lost using this module (shrinking
   stops when an object becomes a single pixel).
-  *{O_EXPAND}:* Expand each object by adding background pixels
   adjacent to the image. You can choose the number of times to expand.
   Processing stops automatically if there are no more background
   pixels.
-  *{O_SKELETONIZE}:* Erode each object to its skeleton.
-  *{O_SPUR}:* Remove or reduce the length of spurs in a skeletonized
   image. The algorithm reduces spur size by the number of pixels
   indicated in the setting *Number of pixels by which to expand or
   shrink*.
""".format(
                **{
                    "O_DIVIDE": O_DIVIDE,
                    "O_EXPAND": O_EXPAND,
                    "O_EXPAND_INF": O_EXPAND_INF,
                    "O_SHRINK": O_SHRINK,
                    "O_SHRINK_INF": O_SHRINK_INF,
                    "O_SKELETONIZE": O_SKELETONIZE,
                    "O_SPUR": O_SPUR,
                }),
        )

        self.iterations = Integer(
            "Number of pixels by which to expand or shrink",
            1,
            minval=1,
            doc="""\
*(Used only if "{O_SHRINK}", "{O_EXPAND}", or "{O_SPUR}" is selected)*

Specify the number of pixels to add or remove from object borders.
""".format(**{
                "O_EXPAND": O_EXPAND,
                "O_SHRINK": O_SHRINK,
                "O_SPUR": O_SPUR
            }),
        )

        self.wants_fill_holes = Binary(
            "Fill holes in objects so that all objects shrink to a single point?",
            False,
            doc="""\
*(Used only if one of the “Shrink” options selected)*

Select *{YES}* to ensure that each object will shrink to a single
point, by filling the holes in each object.

Select *{NO}* to preserve the Euler number. In this case, the shrink
algorithm preserves each object’s Euler number, which means that it will
erode an object with a hole to a ring in order to keep the hole. An
object with two holes will be shrunk to two rings connected by a line in
order to keep from breaking up the object or breaking the hole.
""".format(**{
                "NO": "No",
                "YES": "Yes"
            }),
        )
    def create_settings(self):
        self.ground_truth = ImageSubscriber(
            "Select the image to be used as the ground truth basis for calculating the amount of overlap",
            "None",
            doc="""\
This binary (black and white) image is known as the “ground truth”
image. It can be the product of segmentation performed by hand, or the
result of another segmentation algorithm whose results you would like to
compare.""",
        )

        self.test_img = ImageSubscriber(
            "Select the image to be used to test for overlap",
            "None",
            doc="""\
This binary (black and white) image is what you will compare with the
ground truth image. It is known as the “test image”.""",
        )

        self.wants_emd = Binary(
            "Calculate earth mover's distance?",
            False,
            doc="""\
The earth mover’s distance computes the shortest distance that would
have to be travelled to move each foreground pixel in the test image to
some foreground pixel in the reference image. “Earth mover’s” refers to
an analogy: the pixels are “earth” that has to be moved by some machine
at the smallest possible cost.
It would take too much memory and processing time to compute the exact
earth mover’s distance, so **MeasureImageOverlap** chooses
representative foreground pixels in each image and assigns each
foreground pixel to its closest representative. The earth mover’s
distance is then computed for moving the foreground pixels associated
with each representative in the test image to those in the reference
image.""",
        )

        self.max_points = Integer(
            "Maximum # of points",
            value=250,
            minval=100,
            doc="""\
*(Used only when computing the earth mover’s distance)*

This is the number of representative points that will be taken from the
foreground of the test image and from the foreground of the reference
image using the point selection method (see below).""",
        )

        self.decimation_method = Choice(
            "Point selection method",
            choices=[DM_KMEANS, DM_SKEL],
            doc="""\
*(Used only when computing the earth mover’s distance)*

The point selection setting determines how the representative points
are chosen.

-  *{DM_KMEANS}:* Select to pick representative points using a K-Means
   clustering technique. The foregrounds of both images are combined and
   representatives are picked that minimize the distance to the nearest
   representative. The same representatives are then used for the test
   and reference images.
-  *{DM_SKEL}:* Select to skeletonize the image and pick points
   equidistant along the skeleton.

|image0|  *{DM_KMEANS}* is a choice that’s generally applicable to all
images. *{DM_SKEL}* is best suited to long, skinny objects such as
worms or neurites.

.. |image0| image:: {PROTIP_RECOMMEND_ICON}
""".format(
                **{
                    "DM_KMEANS": DM_KMEANS,
                    "DM_SKEL": DM_SKEL,
                    "PROTIP_RECOMMEND_ICON": _help.PROTIP_RECOMMEND_ICON,
                }),
        )

        self.max_distance = Integer(
            "Maximum distance",
            value=250,
            minval=1,
            doc="""\
*(Used only when computing the earth mover’s distance)*

This setting sets an upper bound to the distance penalty assessed during
the movement calculation. As an example, the score for moving 10 pixels
from one location to a location that is 100 pixels away is 10\*100, but
if the maximum distance were set to 50, the score would be 10\*50
instead.

The maximum distance should be set to the largest reasonable distance
that pixels could be expected to move from one image to the next.""",
        )

        self.penalize_missing = Binary(
            "Penalize missing pixels",
            value=False,
            doc="""\
*(Used only when computing the earth mover’s distance)*

If one image has more foreground pixels than the other, the earth
mover’s distance is not well-defined because there is no destination for
the extra source pixels or vice-versa. It’s reasonable to assess a
penalty for the discrepancy when comparing the accuracy of a
segmentation because the discrepancy represents an error. It’s also
reasonable to assess no penalty if the goal is to compute the cost of
movement, for example between two frames in a time-lapse movie, because
the discrepancy is likely caused by noise or artifacts in segmentation.
Set this setting to “Yes” to assess a penalty equal to the maximum
distance times the absolute difference in number of foreground pixels in
the two images. Set this setting to “No” to assess no penalty.""",
        )