Beispiel #1
0
    def run(self, workspace):
        statistics = []
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        #
        # Hack: if LoadSingleImage is first, no paths are populated
        #
        if self.file_wants_images(self.file_settings[0]):
            m_path = "_".join(
                (C_PATH_NAME, self.file_settings[0].image_name.value))
        else:
            m_path = "_".join((C_OBJECTS_PATH_NAME,
                               self.file_settings[0].objects_name.value))
        if m.get_current_image_measurement(m_path) is None:
            self.prepare_run(workspace)

        image_set = workspace.image_set
        for file_setting in self.file_settings:
            wants_images = self.file_wants_images(file_setting)
            image_name = file_setting.image_name.value if wants_images else \
                file_setting.objects_name.value
            m_path, m_file, m_md5_digest, m_scaling, m_height, m_width = [
                "_".join((c, image_name))
                for c in (C_PATH_NAME if wants_images else C_OBJECTS_PATH_NAME,
                          C_FILE_NAME if wants_images else C_OBJECTS_FILE_NAME,
                          C_MD5_DIGEST, C_SCALING, C_HEIGHT, C_WIDTH)
            ]
            pathname = m.get_current_image_measurement(m_path)
            filename = m.get_current_image_measurement(m_file)
            rescale = (wants_images and file_setting.rescale.value)

            provider = LoadImagesImageProvider(image_name, pathname, filename,
                                               rescale)
            image = provider.provide_image(image_set)
            pixel_data = image.pixel_data
            if wants_images:
                md5 = provider.get_md5_hash(m)
                m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)),
                                        md5)
                m.add_image_measurement("_".join((C_SCALING, image_name)),
                                        image.scale)
                m.add_image_measurement("_".join((C_HEIGHT, image_name)),
                                        int(pixel_data.shape[0]))
                m.add_image_measurement("_".join((C_WIDTH, image_name)),
                                        int(pixel_data.shape[1]))
                image_set.providers.append(provider)
            else:
                #
                # Turn image into objects
                #
                labels = convert_image_to_objects(pixel_data)
                objects = cpo.Objects()
                objects.segmented = labels
                object_set = workspace.object_set
                assert isinstance(object_set, cpo.ObjectSet)
                object_set.add_objects(objects, image_name)
                add_object_count_measurements(m, image_name, objects.count)
                add_object_location_measurements(m, image_name, labels)
                #
                # Add outlines if appropriate
                #
                if file_setting.wants_outlines:
                    outlines = centrosome.outline.outline(labels)
                    outline_image = cpi.Image(outlines.astype(bool))
                    workspace.image_set.add(file_setting.outlines_name.value,
                                            outline_image)
            statistics += [(image_name, filename)]
        workspace.display_data.col_labels = ("Image name", "File")
        workspace.display_data.statistics = statistics
 def test_01_02_get_image(self):
     x = cpi.ImageSet(0, {}, {})
     x.add(IMAGE_NAME, cpi.Image(np.zeros((10,20))))
     image = x.get_image(IMAGE_NAME)
     self.assertEqual(tuple(image.pixel_data.shape), (10,20))
Beispiel #3
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.objects_name.value)
        assert isinstance(objects, cpo.Objects)
        labels = objects.segmented
        if self.relabel_option == OPTION_SPLIT:
            output_labels, count = scind.label(labels > 0, np.ones((3, 3),
                                                                   bool))
        else:
            if self.unify_option == UNIFY_DISTANCE:
                mask = labels > 0
                if self.distance_threshold.value > 0:
                    #
                    # Take the distance transform of the reverse of the mask
                    # and figure out what points are less than 1/2 of the
                    # distance from an object.
                    #
                    d = scind.distance_transform_edt(~mask)
                    mask = d < self.distance_threshold.value / 2 + 1
                output_labels, count = scind.label(mask, np.ones((3, 3), bool))
                output_labels[labels == 0] = 0
                if self.wants_image:
                    output_labels = self.filter_using_image(workspace, mask)
            elif self.unify_option == UNIFY_PARENT:
                parent_objects = workspace.object_set.get_objects(
                    self.parent_object.value)
                parent_labels = parent_objects.segmented
                output_labels = parent_labels.copy()
                output_labels[labels == 0] = 0
                if self.unification_method == UM_CONVEX_HULL:
                    ch_pts, n_pts = morph.convex_hull(output_labels)
                    ijv = morph.fill_convex_hulls(ch_pts, n_pts)
                    include = parent_labels[ijv[:, 0], ijv[:, 1]] == ijv[:, 2]
                    output_labels[ijv[include, 0], ijv[include, 1]] = \
                        ijv[include, 2]

        output_objects = cpo.Objects()
        output_objects.segmented = output_labels
        if objects.has_small_removed_segmented:
            output_objects.small_removed_segmented = \
                copy_labels(objects.small_removed_segmented, output_labels)
        if objects.has_unedited_segmented:
            output_objects.unedited_segmented = \
                copy_labels(objects.unedited_segmented, output_labels)
        output_objects.parent_image = objects.parent_image
        workspace.object_set.add_objects(output_objects,
                                         self.output_objects_name.value)

        measurements = workspace.measurements
        add_object_count_measurements(measurements,
                                      self.output_objects_name.value,
                                      np.max(output_objects.segmented))
        add_object_location_measurements(measurements,
                                         self.output_objects_name.value,
                                         output_objects.segmented)

        #
        # Relate the output objects to the input ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(output_objects)
        measurements.add_measurement(
            self.objects_name.value,
            FF_CHILDREN_COUNT % self.output_objects_name.value,
            children_per_parent)
        measurements.add_measurement(self.output_objects_name.value,
                                     FF_PARENT % self.objects_name.value,
                                     parents_of_children)
        if self.wants_outlines:
            outlines = centrosome.outline.outline(output_labels)
            outline_image = cpi.Image(outlines.astype(bool))
            workspace.image_set.add(self.outlines_name.value, outline_image)

        if self.show_window:
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.output_labels = output_objects.segmented
            if self.unify_option == UNIFY_PARENT:
                workspace.display_data.parent_labels = \
                    workspace.object_set.get_objects(self.parent_object.value).segmented
 def test_02_02_set_mask(self):
     x=cpi.Image()
     x.Mask = np.ones((10,10))
 def test_05_01_mask_of3D(self):
     """The mask of a 3-d image should be 2-d"""
     x=cpi.Image()
     x.image = np.ones((10,10,3))
     self.assertTrue(x.mask.ndim==2)
 def test_00_00_init(self):
     cpi.Image()
 def test_01_02_init_image_mask(self):
     x=cpi.Image(image=np.zeros((10,10)),
                                mask=np.ones((10,10),dtype=np.bool))
Beispiel #8
0
 def run(self, workspace):
     objects = workspace.object_set.get_objects(self.object_name.value)
     labels = objects.segmented
     convert = True
     if not workspace.frame is None:
         figure = workspace.create_or_find_figure(
             title="ConvertObjectsToImage, image cycle #%d" %
             (workspace.measurements.image_set_number),
             subplots=(2, 1))
         figure.subplot_imshow_labels(
             0, 0, labels, "Original: %s" % self.object_name.value)
     if self.image_mode == IM_BINARY:
         pixel_data = labels != 0
         if not workspace.frame is None:
             figure.subplot_imshow_bw(1,
                                      0,
                                      pixel_data,
                                      self.image_name.value,
                                      sharex=figure.subplot(0, 0),
                                      sharey=figure.subplot(0, 0))
     elif self.image_mode == IM_GRAYSCALE:
         pixel_data = labels.astype(float) / (1.0 if np.max(labels) == 0
                                              else np.max(labels))
         if not workspace.frame is None:
             figure.subplot_imshow_grayscale(1,
                                             0,
                                             pixel_data,
                                             self.image_name.value,
                                             sharex=figure.subplot(0, 0),
                                             sharey=figure.subplot(0, 0))
     elif self.image_mode == IM_COLOR:
         import matplotlib.cm
         from cellprofiler.gui.cpfigure_tools import renumber_labels_for_display
         if self.colormap.value == DEFAULT_COLORMAP:
             cm_name = cpprefs.get_default_colormap()
         elif self.colormap.value == COLORCUBE:
             # Colorcube missing from matplotlib
             cm_name = "gist_rainbow"
         elif self.colormap.value == LINES:
             # Lines missing from matplotlib and not much like it,
             # Pretty boring palette anyway, hence
             cm_name = "Pastel1"
         elif self.colormap.value == WHITE:
             # White missing from matplotlib, it's just a colormap
             # of all completely white... not even different kinds of
             # white. And, isn't white just a uniform sampling of
             # frequencies from the spectrum?
             cm_name = "Spectral"
         else:
             cm_name = self.colormap.value
         cm = matplotlib.cm.get_cmap(cm_name)
         mapper = matplotlib.cm.ScalarMappable(cmap=cm)
         pixel_data = mapper.to_rgba(renumber_labels_for_display(labels))
         pixel_data = pixel_data[:, :, :3]
         pixel_data[labels == 0, :] = 0
         if not workspace.frame is None:
             figure.subplot_imshow(1,
                                   0,
                                   pixel_data,
                                   self.image_name.value,
                                   sharex=figure.subplot(0, 0),
                                   sharey=figure.subplot(0, 0))
     elif self.image_mode == IM_UINT16:
         pixel_data = labels.copy()
         if not workspace.frame is None:
             figure.subplot_imshow_grayscale(1,
                                             0,
                                             pixel_data,
                                             self.image_name.value,
                                             sharex=figure.subplot(0, 0),
                                             sharey=figure.subplot(0, 0))
         convert = False
     image = cpi.Image(pixel_data,
                       parent_image=objects.parent_image,
                       convert=convert)
     workspace.image_set.add(self.image_name.value, image)
    def run(self, workspace):
        image_set = workspace.image_set
        shape = None
        if self.input_color_choice == CC_GRAYSCALE:
            if self.wants_red_input.value:
                red_image = image_set.get_image(
                    self.red_input_image.value,
                    must_be_grayscale=True).pixel_data
                shape = red_image.shape
            else:
                red_image = 0
            if self.wants_green_input.value:
                green_image = image_set.get_image(
                    self.green_input_image.value,
                    must_be_grayscale=True).pixel_data
                shape = green_image.shape
            else:
                green_image = 0
            if self.wants_blue_input.value:
                blue_image = image_set.get_image(
                    self.blue_input_image.value,
                    must_be_grayscale=True).pixel_data
                shape = blue_image.shape
            else:
                blue_image = 0
            color_image = np.zeros((shape[0],shape[1],3))
            color_image[:,:,0] = red_image
            color_image[:,:,1] = green_image
            color_image[:,:,2] = blue_image
            red_image = color_image[:,:,0]
            green_image = color_image[:,:,1]
            blue_image = color_image[:,:,2]
        elif self.input_color_choice == CC_COLOR:
            color_image = image_set.get_image(
                self.color_input_image.value,
                must_be_color=True).pixel_data
            red_image = color_image[:,:,0]
            green_image = color_image[:,:,1]
            blue_image = color_image[:,:,2]
        else:
            raise ValueError("Unimplemented color choice: %s" %
                             self.input_color_choice.value)
        inverted_red = (1 - green_image) * (1 - blue_image)
        inverted_green = (1 - red_image) * (1 - blue_image)
        inverted_blue = (1 - red_image) * (1 - green_image)
        inverted_color = np.dstack((inverted_red, inverted_green, inverted_blue))
        if self.output_color_choice == CC_GRAYSCALE:
            for wants_output, output_image_name, output_image in \
                ((self.wants_red_output, self.red_output_image, inverted_red),
                 (self.wants_green_output, self.green_output_image, inverted_green),
                 (self.wants_blue_output, self.blue_output_image, inverted_blue)):
                if wants_output.value:
                    image = cpi.Image(output_image)
                    image_set.add(output_image_name.value, image)
        elif self.output_color_choice == CC_COLOR:
            image = cpi.Image(inverted_color)
            image_set.add(self.color_output_image.value, image)
        else:
            raise ValueError("Unimplemented color choice: %s" %
                             self.output_color_choice.value)

        if self.show_window:
            workspace.display_data.color_image = color_image
            workspace.display_data.inverted_color = inverted_color
Beispiel #10
0
    def run(self, workspace):
        image_set = workspace.image_set
        if self.source_choice == IO_OBJECTS:
            objects = workspace.get_objects(self.object_name.value)
            labels = objects.segmented
            if self.invert_mask.value:
                mask = labels == 0
            else:
                mask = labels > 0
        else:
            objects = None
            try:
                mask = image_set.get_image(self.masking_image_name.value,
                                           must_be_binary=True).pixel_data
            except ValueError:
                mask = image_set.get_image(self.masking_image_name.value,
                                           must_be_grayscale=True).pixel_data
                mask = mask > .5
            if self.invert_mask.value:
                mask = mask == 0
        orig_image = image_set.get_image(self.image_name.value)
        if tuple(mask.shape) != tuple(orig_image.pixel_data.shape[:2]):
            tmp = np.zeros(orig_image.pixel_data.shape[:2], mask.dtype)
            tmp[mask] = True
            mask = tmp
        if orig_image.has_mask:
            mask = np.logical_and(mask, orig_image.mask)
        masked_pixels = orig_image.pixel_data.copy()
        masked_pixels[np.logical_not(mask)] = 0
        masked_image = cpi.Image(masked_pixels,
                                 mask=mask,
                                 parent_image=orig_image,
                                 masking_objects=objects)

        if workspace.frame:
            figure = workspace.create_or_find_figure(
                title="MaskImage, image cycle #%d" %
                (workspace.measurements.image_set_number),
                subplots=(2, 1))
            if orig_image.pixel_data.ndim == 2:
                figure.subplot_imshow_grayscale(
                    0, 0, orig_image.pixel_data,
                    "Original image: %s" % (self.image_name.value))
                figure.subplot_imshow_grayscale(1,
                                                0,
                                                masked_pixels,
                                                "Masked image: %s" %
                                                (self.masked_image_name.value),
                                                sharex=figure.subplot(0, 0),
                                                sharey=figure.subplot(0, 0))
            else:
                figure.subplot_imshow_color(
                    0, 0, orig_image.pixel_data,
                    "Original image: %s" % (self.image_name.value))
                figure.subplot_imshow_color(1,
                                            0,
                                            masked_pixels,
                                            "Masked image: %s" %
                                            (self.masked_image_name.value),
                                            sharex=figure.subplot(0, 0),
                                            sharey=figure.subplot(0, 0))
        image_set.add(self.masked_image_name.value, masked_image)
    def run(self, workspace):
        """Run the module on the current data set
        
        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([p_size < s_size 
                    for p_size,s_size
                    in zip(primary_labels.shape, secondary_labels.shape)]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([p_size > s_size 
                    for p_size,s_size
                    in zip(primary_labels.shape, secondary_labels.shape)]):
                primary_labels = secondary_objects.crop_image_similarly(primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image != None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = cpo.size_similarly(secondary_labels, primary_labels)
            if secondary_objects.parent_image != None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = cpo.size_similarly(secondary_labels, tertiary_image)
        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        if self.shrink_primary:
            primary_mask = np.logical_or(primary_labels == 0,
                                         primary_outline)
        else:
            primary_mask = primary_labels == 0
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels)!=0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = cpo.Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = \
            secondary_objects.relate_children(tertiary_objects)
        if self.shrink_primary:
            child_count_of_primary, primary_parents = \
                primary_objects.relate_children(tertiary_objects)
        else:
            # Primary and tertiary don't overlap.
            # Establish overlap between primary and secondary and commute
            _, secondary_of_primary = \
                secondary_objects.relate_children(primary_objects)
            mask = secondary_of_primary != 0
            child_count_of_primary = np.zeros(mask.shape, int)
            child_count_of_primary[mask] = child_count_of_secondary[
                secondary_of_primary[mask] - 1]
            primary_parents = np.zeros(secondary_parents.shape, 
                                       secondary_parents.dtype)
            primary_of_secondary = np.zeros(secondary_objects.count+1, int)
            primary_of_secondary[secondary_of_primary] = \
                np.arange(1, len(secondary_of_primary)+1)
            primary_of_secondary[0] = 0
            primary_parents = primary_of_secondary[secondary_parents]
        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count, relationship in (
            (self.primary_objects_name, primary_parents, 
             child_count_of_primary, R_REMOVED),
            (self.secondary_objects_name, secondary_parents, 
             child_count_of_secondary, R_PARENT)):
            m.add_measurement(self.subregion_objects_name.value,
                              cpmi.FF_PARENT%(parent_objects_name.value),
                              parents_of)
            m.add_measurement(parent_objects_name.value,
                              cpmi.FF_CHILDREN_COUNT%(self.subregion_objects_name.value),
                              child_count)
            mask = parents_of != 0
            image_number = np.ones(np.sum(mask), int) * m.image_set_number
            child_object_number = np.argwhere(mask).flatten() + 1
            parent_object_number = parents_of[mask]
            m.add_relate_measurement(
                self.module_num, relationship,
                parent_objects_name.value, self.subregion_objects_name.value,
                image_number, parent_object_number,
                image_number, child_object_number)
            
        object_count = tertiary_objects.count
        #
        # The object count
        #
        cpmi.add_object_count_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           object_count)
        #
        # The object locations
        #
        cpmi.add_object_location_measurements(workspace.measurements,
                                              self.subregion_objects_name.value,
                                              tertiary_labels)
        #
        # The outlines
        #
        if self.use_outlines.value:
            out_img = cpi.Image(tertiary_outlines.astype(bool),
                                parent_image = tertiary_image)
            workspace.image_set.add(self.outlines_name.value, out_img)

        if self.show_window:
            workspace.display_data.primary_labels = primary_labels
            workspace.display_data.secondary_labels = secondary_labels
            workspace.display_data.tertiary_labels = tertiary_labels
            workspace.display_data.tertiary_outlines = tertiary_outlines
Beispiel #12
0
    def run(self, workspace):
        '''Filter objects for this image set, display results'''
        src_objects = workspace.get_objects(self.object_name.value)
        if self.rules_or_measurement == ROM_RULES:
            indexes = self.keep_by_rules(workspace, src_objects)
        elif self.filter_choice in (FI_MINIMAL, FI_MAXIMAL):
            indexes = self.keep_one(workspace, src_objects)
        elif self.filter_choice in (FI_MINIMAL_PER_OBJECT,
                                    FI_MAXIMAL_PER_OBJECT):
            indexes = self.keep_per_object(workspace, src_objects)
        elif self.filter_choice == FI_LIMITS:
            indexes = self.keep_within_limits(workspace, src_objects)
        else:
            raise ValueError("Unknown filter choice: %s" %
                             self.filter_choice.value)

        #
        # Create an array that maps label indexes to their new values
        # All labels to be deleted have a value in this array of zero
        #
        new_object_count = len(indexes)
        max_label = np.max(src_objects.segmented)
        label_indexes = np.zeros((max_label + 1, ), int)
        label_indexes[indexes] = np.arange(1, new_object_count + 1)
        #
        # Loop over both the primary and additional objects
        #
        object_list = (
            [(self.object_name.value, self.target_name.value,
              self.wants_outlines.value, self.outlines_name.value)] +
            [(x.object_name.value, x.target_name.value, x.wants_outlines.value,
              x.outlines_name.value) for x in self.additional_objects])
        m = workspace.measurements
        for src_name, target_name, wants_outlines, outlines_name in object_list:
            src_objects = workspace.get_objects(src_name)
            target_labels = src_objects.segmented.copy()
            #
            # Reindex the labels of the old source image
            #
            target_labels[target_labels > max_label] = 0
            target_labels = label_indexes[target_labels]
            #
            # Make a new set of objects - retain the old set's unedited
            # segmentation for the new and generally try to copy stuff
            # from the old to the new.
            #
            target_objects = cpo.Objects()
            target_objects.segmented = target_labels
            target_objects.unedited_segmented = src_objects.unedited_segmented
            if src_objects.has_parent_image:
                target_objects.parent_image = src_objects.parent_image
            workspace.object_set.add_objects(target_objects, target_name)
            #
            # Add measurements for the new objects
            add_object_count_measurements(m, target_name, new_object_count)
            add_object_location_measurements(m, target_name, target_labels)
            #
            # Relate the old numbering to the new numbering
            #
            m.add_measurement(target_name, FF_PARENT % (src_name),
                              np.array(indexes))
            #
            # Count the children (0 / 1)
            #
            child_count = (label_indexes[1:] > 0).astype(int)
            m.add_measurement(src_name, FF_CHILDREN_COUNT % target_name,
                              child_count)
            #
            # Add an outline if asked to do so
            #
            if wants_outlines:
                outline_image = cpi.Image(
                    outline(target_labels) > 0,
                    parent_image=target_objects.parent_image)
                workspace.image_set.add(outlines_name, outline_image)
Beispiel #13
0
 def run(self, workspace):
     #
     # Get the input and output image names. You need to get the .value
     # because otherwise you'll get the setting object instead of
     # the string name.
     #
     input_image_name = self.input_image_name.value
     output_image_name = self.output_image_name.value
     #
     # Get the image set. The image set has all of the images in it.
     #
     image_set = workspace.image_set
     #
     # Get the input image object. We want a grayscale image here.
     # The image set will convert a color image to a grayscale one
     # and warn the user.
     #
     input_image = image_set.get_image(input_image_name,
                                       must_be_grayscale=True)
     #
     # Get the pixels - these are a 2-d Numpy array.
     #
     pixels = input_image.pixel_data
     #
     # Get the smoothing parameter
     #
     if self.automatic_smoothing:
         # Pick the mode of the power spectrum - obviously this
         # is pretty hokey, not intended to really find a good number.
         #
         fft = np.fft.fft2(pixels)
         power2 = np.sqrt((fft * fft.conjugate()).real)
         mode = np.argwhere(power2 == power2.max())[0]
         scale = np.sqrt(np.sum((mode + .5)**2))
     else:
         scale = self.scale.value
     g = gaussian_gradient_magnitude(pixels, scale)
     if self.gradient_choice == GRADIENT_MAGNITUDE:
         output_pixels = g
     else:
         # Numpy uses i and j instead of x and y. The x axis is 1
         # and the y axis is 0
         x = correlate1d(g, [-1, 0, 1], 1)
         y = correlate1d(g, [-1, 0, 1], 0)
         norm = np.sqrt(x**2 + y**2)
         if self.gradient_choice == GRADIENT_DIRECTION_X:
             output_pixels = .5 + x / norm / 2
         else:
             output_pixels = .5 + y / norm / 2
     #
     # Make an image object. It's nice if you tell CellProfiler
     # about the parent image - the child inherits the parent's
     # cropping and masking, but it's not absolutely necessary
     #
     output_image = cpi.Image(output_pixels, parent_image=input_image)
     image_set.add(output_image_name, output_image)
     #
     # Save intermediate results for display if the window frame is on
     #
     if self.show_window:
         workspace.display_data.input_pixels = pixels
         workspace.display_data.gradient = g
         workspace.display_data.output_pixels = output_pixels
Beispiel #14
0
    def run(self, workspace):
        image_names = [
            image.image_name.value for image in self.images
            if image.image_or_measurement == IM_IMAGE
        ]
        image_factors = [image.factor.value for image in self.images]
        wants_image = [
            image.image_or_measurement == IM_IMAGE for image in self.images
        ]
        if self.operation.value in \
           (O_INVERT, O_LOG_TRANSFORM, O_LOG_TRANSFORM_LEGACY, O_NONE):
            # these only operate on the first image
            image_names = image_names[:1]
            image_factors = image_factors[:1]

        images = [workspace.image_set.get_image(x) for x in image_names]
        pixel_data = [image.pixel_data for image in images]
        masks = [image.mask if image.has_mask else None for image in images]
        #
        # Crop all of the images similarly
        #
        smallest = np.argmin([np.product(pd.shape) for pd in pixel_data])
        smallest_image = images[smallest]
        for i in [x for x in range(len(images)) if x != smallest]:
            pixel_data[i] = smallest_image.crop_image_similarly(pixel_data[i])
            if masks[i] is not None:
                masks[i] = smallest_image.crop_image_similarly(masks[i])
        # weave in the measurements
        idx = 0
        measurements = workspace.measurements
        assert isinstance(measurements, cpmeas.Measurements)
        for i in range(self.operand_count):
            if not wants_image[i]:
                value = measurements.get_current_image_measurement(
                    self.images[i].measurement.value)
                if value is None:
                    value = np.NaN
                else:
                    value = float(value)
                pixel_data.insert(i, value)
                masks.insert(i, True)
        #
        # Multiply images by their factors
        #
        for i, image_factor in enumerate(image_factors):
            if image_factor != 1:
                pixel_data[i] = pixel_data[i] * image_factors[i]

        output_pixel_data = pixel_data[0]
        output_mask = masks[0]

        opval = self.operation.value
        if opval in (O_ADD, O_SUBTRACT, O_DIFFERENCE, O_MULTIPLY, O_DIVIDE,
                     O_AVERAGE, O_MAXIMUM):
            # Binary operations
            if opval in (O_ADD, O_AVERAGE):
                op = np.add
            elif opval == O_SUBTRACT:
                op = np.subtract
            elif opval == O_DIFFERENCE:
                op = lambda x, y: np.abs(np.subtract(x, y))
            elif opval == O_MULTIPLY:
                if output_pixel_data.dtype == np.bool and \
                   all([pd.dtype == np.bool for pd in pixel_data[1:]]):
                    op = np.logical_and
                else:
                    op = np.multiply
            elif opval == O_MAXIMUM:
                op = np.maximum
            else:
                op = np.divide
            for pd, mask in zip(pixel_data[1:], masks[1:]):
                if not np.isscalar(pd) and output_pixel_data.ndim != pd.ndim:
                    if output_pixel_data.ndim == 2:
                        output_pixel_data = output_pixel_data[:, :, np.newaxis]
                    if pd.ndim == 2:
                        pd = pd[:, :, np.newaxis]
                output_pixel_data = op(output_pixel_data, pd)
                if self.ignore_mask == True:
                    continue
                else:
                    if output_mask is None:
                        output_mask = mask
                    elif mask is not None:
                        output_mask = (output_mask & mask)
            if opval == O_AVERAGE:
                output_pixel_data /= sum(image_factors)
        elif opval == O_INVERT:
            output_pixel_data = 1 - output_pixel_data
        elif opval == O_LOG_TRANSFORM:
            output_pixel_data = np.log2(output_pixel_data + 1)
        elif opval == O_LOG_TRANSFORM_LEGACY:
            output_pixel_data = np.log2(output_pixel_data)
        elif opval == O_NONE:
            output_pixel_data = output_pixel_data.copy()
        else:
            raise NotImplementedError(
                "The operation %s has not been implemented" % opval)

        # Check to see if there was a measurement & image w/o mask. If so
        # set mask to none
        if np.isscalar(output_mask):
            output_mask = None
        #
        # Post-processing: exponent, multiply, add
        #
        if self.exponent.value != 1:
            output_pixel_data **= self.exponent.value
        if self.after_factor.value != 1:
            output_pixel_data *= self.after_factor.value
        if self.addend.value != 0:
            output_pixel_data += self.addend.value

        #
        # truncate values
        #
        if self.truncate_low.value:
            output_pixel_data[output_pixel_data < 0] = 0
        if self.truncate_high.value:
            output_pixel_data[output_pixel_data > 1] = 1

        #
        # add the output image to the workspace
        #
        crop_mask = (smallest_image.crop_mask
                     if smallest_image.has_crop_mask else None)
        masking_objects = (smallest_image.masking_objects
                           if smallest_image.has_masking_objects else None)
        output_image = cpi.Image(output_pixel_data,
                                 mask=output_mask,
                                 crop_mask=crop_mask,
                                 parent_image=images[0],
                                 masking_objects=masking_objects,
                                 convert=False)
        workspace.image_set.add(self.output_image_name.value, output_image)

        #
        # Display results
        #
        if self.show_window:
            workspace.display_data.pixel_data = \
                [image.pixel_data for image in images] + [output_pixel_data]
            workspace.display_data.display_names = \
                image_names + [self.output_image_name.value]
 def test_05_03_must_be_rgb_throws_5_channel(self):
     x = cpi.ImageSet(0, {}, {})
     np.random.seed(22)
     x.add(IMAGE_NAME, cpi.Image(np.random.uniform(size=(10,20,5))))
     self.assertRaises(ValueError, x.get_image, IMAGE_NAME, 
                       must_be_rgb=True)
    def provide_image(self, image_set):
        '''load an image plane from an omero server
        and return a 2-d grayscale image
        '''
        # TODO: return 3d RGB images when c == None like loadimage.py does?

        if self.__is_cached == True:
            return self.__omero_image_plane

        gateway = self.__gateway
        pixels_id = self.__pixels_id
        z = self.__z
        c = self.__c
        t = self.__t

        # Retrieve the image data from the omero server
        pixels = self.__pixels
        omero_image_plane = gateway.getPlane(pixels_id, z, c, t)

        # Create a 'cellprofiler' image
        width = pixels.getSizeX().getValue()
        height = pixels.getSizeY().getValue()
        pixels_type = pixels.getPixelsType().getValue().getValue()

        # OMERO stores images in big endian format
        little_endian = False
        if pixels_type == INT_8:
            dtype = np.char
            scale = 255
        elif pixels_type == UINT_8:
            dtype = np.uint8
            scale = 255
        elif pixels_type == UINT_16:
            dtype = '<u2' if little_endian else '>u2'
            scale = 65535
        elif pixels_type == INT_16:
            dtype = '<i2' if little_endian else '>i2'
            scale = 65535
        elif pixels_type == UINT_32:
            dtype = '<u4' if little_endian else '>u4'
            scale = 2**32
        elif pixels_type == INT_32:
            dtype = '<i4' if little_endian else '>i4'
            scale = 2**32 - 1
        elif pixels_type == FLOAT:
            dtype = '<f4' if little_endian else '>f4'
            scale = 1
        elif pixels_type == DOUBLE:
            dtype = '<f8' if little_endian else '>f8'
            scale = 1
        else:
            raise NotImplementedError(
                "omero pixels type not implemented for %s" % pixels_type)
        # TODO: should something be done here with MaxSampleValue (like loadimages.py does)?

        image = np.frombuffer(omero_image_plane, dtype)
        image.shape = (height, width)
        image = image.astype(np.float32) / float(scale)
        image = cpimage.Image(image)
        self.__cpimage_data = image
        self.__is_cached = True
        return image
 def test_05_04_must_be_rgb_alpha(self):
     x = cpi.ImageSet(0, {}, {})
     x.add(IMAGE_NAME, cpi.Image(np.zeros((10,20,4), float)))
     image = x.get_image(IMAGE_NAME, must_be_rgb=True)
     self.assertEqual(tuple(image.pixel_data.shape), (10,20,3))
Beispiel #18
0
    def run_two_measurements(self, workspace):
        measurements = workspace.measurements
        in_high_class = []
        saved_values = []
        for feature, threshold_method, threshold in (
            (self.first_measurement, self.first_threshold_method, 
             self.first_threshold),
            (self.second_measurement, self.second_threshold_method,
             self.second_threshold)):
            values = measurements.get_current_measurement(
                self.object_name.value, feature.value)
            saved_values.append(values)
            if threshold_method == TM_CUSTOM:
                t = threshold.value
            elif len(values) == 0:
                t = 0
            elif threshold_method == TM_MEAN:
                t = np.mean(values[~np.isnan(values)])
            elif threshold_method == TM_MEDIAN:
                t = np.median(values[~np.isnan(values)])
            else:
                raise ValueError("Unknown threshold method: %s" %
                                 threshold_method.value)
            in_high_class.append(values >= t)
        feature_names = self.get_feature_name_matrix()
        num_values = len(values)
        for i in range(2):
            for j in range(2):
                in_class = ((in_high_class[0].astype(int) == i) &
                            (in_high_class[1].astype(int) == j))
                measurements.add_measurement(self.object_name.value,
                                             "_".join((M_CATEGORY, feature_names[i,j])),
                                             in_class.astype(int))
                num_hits = in_class.sum()
                measurement_name = '_'.join((M_CATEGORY, feature_names[i,j],F_NUM_PER_BIN))
                measurements.add_measurement(cpmeas.IMAGE, measurement_name,
                                             num_hits)
                measurement_name = '_'.join((M_CATEGORY, feature_names[i,j],F_PCT_PER_BIN))
                measurements.add_measurement(cpmeas.IMAGE, measurement_name,
                                         100.0*float(num_hits)/num_values if num_values > 0 else 0)

        objects = workspace.object_set.get_objects(self.object_name.value)
        if self.wants_image:
            class_1, class_2 = in_high_class
            object_codes = class_1.astype(int)+class_2.astype(int)*2 + 1
            object_codes = np.hstack(([0], object_codes))
            object_codes[np.hstack((False,np.isnan(values)))] = 0
            nobjects = len(class_1)
            mapping = np.zeros(nobjects+1, int)
            mapping[1:] = np.arange(1, nobjects+1)
            for i in range(2): 
                mapping[np.isnan(saved_values[i])] = 0  
            labels = object_codes[mapping[objects.segmented]]
            colors = self.get_colors(4)
            image = colors[labels,:3]
            image = cpi.Image(image,parent_image = objects.parent_image)
            workspace.image_set.add(self.image_name.value, image)
            
        if self.show_window:
            workspace.display_data.in_high_class=in_high_class
            workspace.display_data.labels = objects.segmented,
            workspace.display_data.saved_values = saved_values
 def test_01_01_init_image(self):
     x=cpi.Image(np.zeros((10,10)))
    def run(self, workspace):
        import matplotlib
        import matplotlib.cm
        import matplotlib.backends.backend_agg
        import matplotlib.transforms
        from cellprofiler.gui.cpfigure_tools import figure_to_image, only_display_image
        #
        # Get the image
        #
        image = workspace.image_set.get_image(self.image_name.value)
        if self.wants_image:
            pixel_data = image.pixel_data
        else:
            pixel_data = np.zeros(image.pixel_data.shape[:2])
        if self.objects_or_image == OI_OBJECTS:
            objects = workspace.object_set.get_objects(self.objects_name.value)
        workspace.display_data.pixel_data = pixel_data
        if self.use_color_map():
            workspace.display_data.labels = objects.segmented
        #
        # Get the measurements and positions
        #
        measurements = workspace.measurements
        if self.objects_or_image == OI_IMAGE:
            value = measurements.get_current_image_measurement(
                self.measurement.value)
            values = [value]
            x = [pixel_data.shape[1] / 2]
            x_offset = np.random.uniform(high=1.0, low=-1.0)
            x[0] += x_offset
            y = [pixel_data.shape[0] / 2]
            y_offset = np.sqrt(1 - x_offset**2)
            y[0] += y_offset
        else:
            values = measurements.get_current_measurement(
                self.objects_name.value, self.measurement.value)
            if len(values) < objects.count:
                temp = np.zeros(objects.count, values.dtype)
                temp[:len(values)] = values
                temp[len(values):] = np.nan
                values = temp
            x = measurements.get_current_measurement(self.objects_name.value,
                                                     M_LOCATION_CENTER_X)
            x_offset = np.random.uniform(high=1.0, low=-1.0, size=x.shape)
            y_offset = np.sqrt(1 - x_offset**2)
            x += self.offset.value * x_offset
            y = measurements.get_current_measurement(self.objects_name.value,
                                                     M_LOCATION_CENTER_Y)
            y += self.offset.value * y_offset
            mask = ~(np.isnan(values) | np.isnan(x) | np.isnan(y))
            values = values[mask]
            x = x[mask]
            y = y[mask]
            workspace.display_data.mask = mask
        workspace.display_data.values = values
        workspace.display_data.x = x
        workspace.display_data.y = y
        fig = matplotlib.figure.Figure()
        axes = fig.add_subplot(1, 1, 1)

        def imshow_fn(pixel_data):
            # Note: requires typecast to avoid failure during
            #       figure_to_image (IMG-764)
            img = pixel_data * 255
            img[img < 0] = 0
            img[img > 255] = 255
            img = img.astype(np.uint8)
            axes.imshow(img, cmap=matplotlib.cm.Greys_r)

        self.display_on_figure(workspace, axes, imshow_fn)

        canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
        if self.saved_image_contents == E_AXES:
            fig.set_frameon(False)
            if not self.use_color_map():
                fig.subplots_adjust(0.1, .1, .9, .9, 0, 0)
            shape = pixel_data.shape
            width = float(shape[1]) / fig.dpi
            height = float(shape[0]) / fig.dpi
            fig.set_figheight(height)
            fig.set_figwidth(width)
        elif self.saved_image_contents == E_IMAGE:
            if self.use_color_map():
                fig.axes[1].set_visible(False)
            only_display_image(fig, pixel_data.shape)
        else:
            if not self.use_color_map():
                fig.subplots_adjust(.1, .1, .9, .9, 0, 0)

        pixel_data = figure_to_image(fig, dpi=fig.dpi)
        image = cpi.Image(pixel_data)
        workspace.image_set.add(self.display_image.value, image)
 def test_02_01_set_image(self):
     x=cpi.Image()
     x.Image = np.ones((10,10))
Beispiel #22
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(skeleton_name,
                                                       must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels, footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:

            def size_fn(area, is_object):
                return (~is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(combined_skel,
                                                     ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1]
                    & combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(
                scind.sum(branching_counts, nearby_labels,
                          label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0, ), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(
                scind.sum(branch_points, outside_labels, label_range))
        else:
            branch_counts = np.zeros((0, ), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels,
                                       label_range))
        else:
            end_counts = np.zeros((0, ), int)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join(
            (C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES, skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                combined_skel, dlabels, trunk_mask,
                branch_points & ~trunk_mask, end_points,
                intensity_image.pixel_data)

            image_number = workspace.measurements.image_set_number

            edge_path, vertex_path = self.get_graph_file_paths(
                m, m.image_number)
            workspace.interaction_request(self,
                                          m.image_number,
                                          edge_path,
                                          edge_graph,
                                          vertex_path,
                                          vertex_graph,
                                          headless_ok=True)

            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros(
                (skeleton.shape[0], skeleton.shape[1], 3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image, parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
 def test_04_01_image_mask_missize(self):
     x = cpi.Image()
     x.image = np.ones((10,10))
     self.assertRaises(AssertionError,x.set_mask,np.ones((5,5)))
 def test_02_02_must_be_binary_throws(self):
     x = cpi.ImageSet(0, {}, {})
     x.add(IMAGE_NAME, cpi.Image(np.zeros((10,20), float)))
     self.assertRaises(ValueError, x.get_image, IMAGE_NAME, 
                       must_be_binary=True)
 def test_01_01_add(self):
     x = cpi.ImageSet(0, {}, {})
     x.add(IMAGE_NAME, cpi.Image(np.zeros((10,20))))
     self.assertEqual(len(x.providers), 1)
     self.assertEqual(x.providers[0].name, IMAGE_NAME)
 def test_03_03_must_be_gray_color(self):
     x = cpi.ImageSet(0, {}, {})
     x.add(IMAGE_NAME, cpi.Image(np.zeros((10,20,3), float)))
     image = x.get_image(IMAGE_NAME, must_be_grayscale=True)
     self.assertEqual(tuple(image.pixel_data.shape), (10,20))
 def test_02_01_must_be_binary(self):
     x = cpi.ImageSet(0, {}, {})
     x.add(IMAGE_NAME, cpi.Image(np.zeros((10,20), bool)))
     image = x.get_image(IMAGE_NAME, must_be_binary=True)
     self.assertEqual(tuple(image.pixel_data.shape), (10,20))
 def test_04_02_must_be_color_throws(self):
     x = cpi.ImageSet(0, {}, {})
     np.random.seed(22)
     x.add(IMAGE_NAME, cpi.Image(np.random.uniform(size=(10,20))))
     self.assertRaises(ValueError, x.get_image, IMAGE_NAME, 
                       must_be_color=True)
 def run(self, workspace):
     objects = workspace.object_set.get_objects(self.object_name.value)
     alpha = np.zeros(objects.shape)
     if self.image_mode == IM_BINARY:
         pixel_data = np.zeros(objects.shape, bool)
     elif self.image_mode == IM_GRAYSCALE:
         pixel_data = np.zeros(objects.shape)
     elif self.image_mode == IM_UINT16:
         pixel_data = np.zeros(objects.shape, np.int32)
     else:
         pixel_data = np.zeros((objects.shape[0], objects.shape[1], 3))
     convert = True
     for labels, indices in objects.get_labels():
         mask = labels != 0
         if np.all(~ mask):
             continue
         if self.image_mode == IM_BINARY:
             pixel_data[mask] = True
             alpha[mask] = 1
         elif self.image_mode == IM_GRAYSCALE:
             pixel_data[mask] = labels[mask].astype(float) / np.max(labels)
             alpha[mask] = 1
         elif self.image_mode == IM_COLOR:
             import matplotlib.cm
             from cellprofiler.gui.cpfigure_tools import renumber_labels_for_display
             if self.colormap.value == DEFAULT_COLORMAP:
                 cm_name = cpprefs.get_default_colormap()
             elif self.colormap.value == COLORCUBE:
                 # Colorcube missing from matplotlib
                 cm_name = "gist_rainbow"
             elif self.colormap.value == LINES:
                 # Lines missing from matplotlib and not much like it,
                 # Pretty boring palette anyway, hence
                 cm_name = "Pastel1"
             elif self.colormap.value == WHITE:
                 # White missing from matplotlib, it's just a colormap
                 # of all completely white... not even different kinds of
                 # white. And, isn't white just a uniform sampling of
                 # frequencies from the spectrum?
                 cm_name = "Spectral"
             else:
                 cm_name = self.colormap.value
             cm = matplotlib.cm.get_cmap(cm_name)
             mapper = matplotlib.cm.ScalarMappable(cmap=cm)
             pixel_data[mask, :] += \
                 mapper.to_rgba(renumber_labels_for_display(labels))[mask, :3]
             alpha[mask] += 1
         elif self.image_mode == IM_UINT16:
             pixel_data[mask] = labels[mask]
             alpha[mask] = 1
             convert = False
     mask = alpha > 0
     if self.image_mode == IM_BINARY:
         pass
     elif self.image_mode == IM_COLOR:
         pixel_data[mask, :] = pixel_data[mask, :] / alpha[mask][:, np.newaxis]
     else:
         pixel_data[mask] = pixel_data[mask] / alpha[mask]
     image = cpi.Image(pixel_data, parent_image = objects.parent_image,
                       convert = convert)
     workspace.image_set.add(self.image_name.value, image)
     if self.show_window:
         workspace.display_data.ijv = objects.ijv
         workspace.display_data.pixel_data = pixel_data
 def run_module(self,
                image,
                labels,
                center_labels=None,
                center_choice=M.C_CENTERS_OF_OTHER,
                bin_count=4,
                maximum_radius=100,
                wants_scaled=True,
                wants_workspace=False):
     '''Run the module, returning the measurements
     
     image - matrix representing the image to be analyzed
     labels - labels matrix of objects to be analyzed
     center_labels - labels matrix of alternate centers or None for self
                     centers
     bin_count - # of radial bins
     '''
     module = M.MeasureObjectRadialDistribution()
     module.images[0].image_name.value = IMAGE_NAME
     module.objects[0].object_name.value = OBJECT_NAME
     object_set = cpo.ObjectSet()
     main_objects = cpo.Objects()
     main_objects.segmented = labels
     object_set.add_objects(main_objects, OBJECT_NAME)
     if center_labels is None:
         module.objects[0].center_choice.value = M.C_SELF
     else:
         module.objects[0].center_choice.value = center_choice
         module.objects[0].center_object_name.value = CENTER_NAME
         center_objects = cpo.Objects()
         center_objects.segmented = center_labels
         object_set.add_objects(center_objects, CENTER_NAME)
     module.bin_counts[0].bin_count.value = bin_count
     module.bin_counts[0].wants_scaled.value = wants_scaled
     module.bin_counts[0].maximum_radius.value = maximum_radius
     module.add_heatmap()
     module.add_heatmap()
     module.add_heatmap()
     for i, (a, f) in enumerate(
         ((M.A_FRAC_AT_D, M.F_FRAC_AT_D), (M.A_MEAN_FRAC, M.F_MEAN_FRAC),
          (M.A_RADIAL_CV, M.F_RADIAL_CV))):
         module.heatmaps[i].image_name.value = IMAGE_NAME
         module.heatmaps[i].object_name.value = OBJECT_NAME
         module.heatmaps[i].bin_count.value = str(bin_count)
         module.heatmaps[i].wants_to_save_display.value = True
         display_name = HEAT_MAP_NAME + f
         module.heatmaps[i].display_name.value = display_name
         module.heatmaps[i].colormap.value = "gray"
         module.heatmaps[i].measurement.value = a
     pipeline = cpp.Pipeline()
     measurements = cpmeas.Measurements()
     image_set_list = cpi.ImageSetList()
     image_set = measurements
     img = cpi.Image(image)
     image_set.add(IMAGE_NAME, img)
     workspace = cpw.Workspace(pipeline, module, image_set, object_set,
                               measurements, image_set_list)
     module.run(workspace)
     if wants_workspace:
         return measurements, workspace
     return measurements