Ejemplo n.º 1
0
    def get_colors(self, count):
        """Get colors used for two-measurement labels image"""
        import matplotlib.cm as cm

        cmap = cm.get_cmap(cpprefs.get_default_colormap())
        #
        # Trick the colormap into divulging the values used.
        #
        sm = cm.ScalarMappable(cmap=cmap)
        colors = sm.to_rgba(np.arange(count) + 1)
        return np.vstack((np.zeros(colors.shape[1]), colors))
Ejemplo n.º 2
0
    def display_on_figure(self, workspace, axes, imshow_fn):
        if self.use_color_map():
            labels = workspace.display_data.labels
            if self.wants_image:
                pixel_data = workspace.display_data.pixel_data
            else:
                pixel_data = (labels != 0).astype(numpy.float32)
            if pixel_data.ndim == 3:
                pixel_data = numpy.sum(pixel_data, 2) / pixel_data.shape[2]
            colormap_name = self.colormap.value
            if colormap_name == "Default":
                colormap_name = get_default_colormap()
            colormap = matplotlib.cm.get_cmap(colormap_name)
            values = workspace.display_data.values
            vmask = workspace.display_data.mask
            colors = numpy.ones((len(vmask) + 1, 4))
            colors[1:][~vmask, :3] = 1
            sm = matplotlib.cm.ScalarMappable(cmap=colormap)
            if self.color_map_scale_choice == CMS_MANUAL:
                sm.set_clim(self.color_map_scale.min, self.color_map_scale.max)
            sm.set_array(values)
            colors[1:][vmask, :] = sm.to_rgba(values)
            img = colors[labels, :3] * pixel_data[:, :, numpy.newaxis]
            imshow_fn(img)
            assert isinstance(axes, matplotlib.axes.Axes)
            figure = axes.get_figure()
            assert isinstance(figure, matplotlib.figure.Figure)
            figure.colorbar(sm, ax=axes)
        else:
            imshow_fn(workspace.display_data.pixel_data)
            for x, y, value in zip(
                    workspace.display_data.x,
                    workspace.display_data.y,
                    workspace.display_data.values,
            ):
                try:
                    fvalue = float(value)
                    svalue = "%.*f" % (self.decimals.value, value)
                except:
                    svalue = str(value)

                text = matplotlib.text.Text(
                    x=x,
                    y=y,
                    text=svalue,
                    size=self.font_size.value,
                    color=self.text_color.value,
                    verticalalignment="center",
                    horizontalalignment="center",
                )
                axes.add_artist(text)
 def __init__(self, vw, color, can_delete):
     super(WorkspaceViewObjectsRow, self).__init__(vw, color, can_delete)
     self.update_chooser(first=True)
     name = self.chooser.GetStringSelection()
     self.data = bind_data_class(ObjectsData, self.color_ctrl, vw.redraw)(
         name,
         None,
         outline_color=self.color,
         colormap=get_default_colormap(),
         alpha=0.5,
         mode=MODE_HIDE,
     )
     vw.image.add(self.data)
     self.last_mode = MODE_LINES
Ejemplo n.º 4
0
    def upgrade_settings(self, setting_values, variable_revision_number,
                         module_name):
        if variable_revision_number == 1:
            (
                objects_or_image,
                objects_name,
                measurement,
                image_name,
                text_color,
                display_image,
                dpi,
                saved_image_contents,
            ) = setting_values
            setting_values = [
                objects_or_image,
                objects_name,
                measurement,
                image_name,
                text_color,
                display_image,
                10,
                2,
                saved_image_contents,
            ]
            variable_revision_number = 2

        if variable_revision_number == 2:
            """Added annotation offset"""
            setting_values = setting_values + ["0"]
            variable_revision_number = 3

        if variable_revision_number == 3:
            # Added color map mode
            setting_values = setting_values + [
                CT_TEXT,
                get_default_colormap(),
            ]
            variable_revision_number = 4

        if variable_revision_number == 4:
            # added wants_image
            setting_values = setting_values + ["Yes"]
            variable_revision_number = 5
        if variable_revision_number == 5:
            # added color_map_scale_choice and color_map_scale
            setting_values = setting_values + [
                CMS_USE_MEASUREMENT_RANGE, "0.0,1.0"
            ]
            variable_revision_number = 6
        return setting_values, variable_revision_number
 def display(self, workspace, figure):
     import matplotlib
     nsubplots = 0
     if self.wants_population_density():
         nsubplots += 1
     if self.wants_distance_to_edge():
         nsubplots += 1
     figure = workspace.create_or_find_figure(subplots=(nsubplots, 1))
     cmap = cpprefs.get_default_colormap()
     cm = matplotlib.cm.get_cmap(cmap)
     cm.set_bad(color='black')
     axes = None
     if self.wants_population_density():
         image = np.ma.MaskedArray(workspace.display_data.count_display,
                                   workspace.display_data.count_display < 0)
         title = "# objects within %d px" % self.radius.value
         axes = figure.subplot_imshow(0,
                                      0,
                                      image,
                                      title=title,
                                      colormap=cm,
                                      colorbar=True,
                                      normalize=False,
                                      vmin=0,
                                      vmax=np.max(image))
     if self.wants_distance_to_edge():
         sm = matplotlib.cm.ScalarMappable(cmap=cm)
         image = np.ma.MaskedArray(workspace.display_data.distances,
                                   workspace.display_data.distances < 0)
         image = sm.to_rgba(image)
         #
         # We give the edge a little blur so that single pixels show up ok
         #
         edge = gaussian_filter(workspace.display_data.edge.astype(float),
                                3)
         edge = edge / np.max(edge)
         edge_color = sm.to_rgba(
             np.array([np.max(workspace.display_data.distances)]))[0]
         image = image * (1 - edge[:, :, np.newaxis]) + \
             edge[:, :, np.newaxis] * edge_color[np.newaxis, np.newaxis, :]
         figure.subplot_imshow(nsubplots - 1,
                               0,
                               image,
                               title="Distance to edge")
Ejemplo n.º 6
0
    def display(self, workspace, figure):
        """Display a visualization of the results"""
        from matplotlib.axes import Axes
        from matplotlib.lines import Line2D
        import matplotlib.cm

        if self.wants_objskeleton_graph:
            figure.set_subplots((2, 1))
        else:
            figure.set_subplots((1, 1))
        title = (
            "Branchpoints of %s and %s\nTrunks are red\nBranches are green\nEndpoints are blue"
            % (self.seed_objects_name.value, self.image_name.value))
        figure.subplot_imshow(0, 0, workspace.display_data.branchpoint_image,
                              title)
        if self.wants_objskeleton_graph:
            image = workspace.display_data.intensity_image
            figure.subplot_imshow_grayscale(1,
                                            0,
                                            image,
                                            title="ObjectSkeleton graph",
                                            sharexy=figure.subplot(0, 0))
            axes = figure.subplot(1, 0)
            assert isinstance(axes, Axes)
            edge_graph = workspace.display_data.edge_graph
            vertex_graph = workspace.display_data.vertex_graph
            i = vertex_graph["i"]
            j = vertex_graph["j"]
            kind = vertex_graph["kind"]
            brightness = edge_graph["total_intensity"] / edge_graph["length"]
            brightness = (brightness - np.min(brightness)) / (
                np.max(brightness) - np.min(brightness) + 0.000001)
            cm = matplotlib.cm.get_cmap(cpprefs.get_default_colormap())
            cmap = matplotlib.cm.ScalarMappable(cmap=cm)
            edge_color = cmap.to_rgba(brightness)
            for idx in range(len(edge_graph["v1"])):
                v = np.array(
                    [edge_graph["v1"][idx] - 1, edge_graph["v2"][idx] - 1])
                line = Line2D(j[v], i[v], color=edge_color[idx])
                axes.add_line(line)
    def __init__(self, workspace_view, color, can_delete):
        super(WorkspaceViewImageRow, self).__init__(workspace_view, color, can_delete)
        image_set = workspace_view.workspace.image_set
        name = self.chooser.GetStringSelection()

        im = get_intensity_mode()
        if im == INTENSITY_MODE_LOG:
            normalization = NORMALIZE_LOG
        elif im == INTENSITY_MODE_NORMAL:
            normalization = NORMALIZE_LINEAR
        else:
            normalization = NORMALIZE_RAW
        alpha = 1.0 / (len(workspace_view.image_rows) + 1.0)
        self.data = bind_data_class(ImageData, self.color_ctrl, workspace_view.redraw)(
            name,
            None,
            mode=MODE_HIDE,
            color=self.color,
            colormap=get_default_colormap(),
            alpha=alpha,
            normalization=normalization,
        )
        workspace_view.image.add(self.data)
        self.last_mode = MODE_COLORIZE
Ejemplo n.º 8
0
    def display(self, workspace, figure):
        """Show an informative display"""
        import matplotlib
        import cellprofiler.gui.figure

        figure.set_subplots((2, 1))
        assert isinstance(figure, cellprofiler.gui.figure.Figure)

        i = workspace.display_data.i
        j = workspace.display_data.j
        angles = workspace.display_data.angle
        mask = workspace.display_data.mask
        labels = workspace.display_data.labels
        count = workspace.display_data.count

        color_image = numpy.zeros((mask.shape[0], mask.shape[1], 4))
        #
        # We do the coloring using alpha values to let the different
        # things we draw meld together.
        #
        # The binary mask is white.
        #
        color_image[mask, :] = MASK_ALPHA
        if count > 0:
            mappable = matplotlib.cm.ScalarMappable(
                cmap=matplotlib.cm.get_cmap(get_default_colormap())
            )
            numpy.random.seed(0)
            colors = mappable.to_rgba(numpy.random.permutation(numpy.arange(count)))

            #
            # The labels
            #
            color_image[labels > 0, :] += (
                colors[labels[labels > 0] - 1, :] * LABEL_ALPHA
            )
            #
            # Do each diamond individually (because the angles are almost certainly
            # different for each
            #
            lcolors = colors * 0.5 + 0.5  # Wash the colors out a little
            for ii in range(count):
                diamond = self.get_diamond(angles[ii])
                hshape = ((numpy.array(diamond.shape) - 1) / 2).astype(int)
                iii = i[ii]
                jjj = j[ii]
                color_image[
                    iii - hshape[0] : iii + hshape[0] + 1,
                    jjj - hshape[1] : jjj + hshape[1] + 1,
                    :,
                ][diamond, :] += (lcolors[ii, :] * WORM_ALPHA)
        #
        # Do our own alpha-normalization
        #
        color_image[:, :, -1][color_image[:, :, -1] == 0] = 1
        color_image[:, :, :-1] = (
            color_image[:, :, :-1] / color_image[:, :, -1][:, :, numpy.newaxis]
        )
        plot00 = figure.subplot_imshow_bw(0, 0, mask, self.image_name.value)
        figure.subplot_imshow_color(
            1,
            0,
            color_image[:, :, :-1],
            title=self.object_name.value,
            normalize=False,
            sharexy=plot00,
        )
def get_colormap(name):
    """Get colormap, accounting for possible request for default"""
    if name == "Default":
        name = cpprefs.get_default_colormap()
    return matplotlib.cm.get_cmap(name)
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        dimensions = len(objects.shape)
        assert isinstance(objects, cpo.Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        neighbor_labels = neighbor_objects.small_removed_segmented
        neighbor_kept_labels = neighbor_objects.segmented
        assert isinstance(neighbor_objects, cpo.Objects)
        if not self.wants_excluded_objects.value:
            # Remove labels not present in kept segmentation while preserving object IDs.
            mask = neighbor_kept_labels > 0
            neighbor_labels[~mask] = 0
        nobjects = np.max(labels)
        nkept_objects = len(objects.indices)
        nneighbors = np.max(neighbor_labels)

        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
            neighbor_has_pixels = has_pixels
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.small_removed_segmented)
            neighbor_has_pixels = np.bincount(neighbor_labels.ravel())[1:] > 0
        neighbor_count = np.zeros((nobjects, ))
        pixel_count = np.zeros((nobjects, ))
        first_object_number = np.zeros((nobjects, ), int)
        second_object_number = np.zeros((nobjects, ), int)
        first_x_vector = np.zeros((nobjects, ))
        second_x_vector = np.zeros((nobjects, ))
        first_y_vector = np.zeros((nobjects, ))
        second_y_vector = np.zeros((nobjects, ))
        angle = np.zeros((nobjects, ))
        percent_touching = np.zeros((nobjects, ))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            if dimensions == 2:
                i, j = scind.distance_transform_edt(labels == 0,
                                                    return_distances=False,
                                                    return_indices=True)
                # Assign each background pixel to the label of its nearest
                # foreground pixel. Assign label to label for foreground.
                labels = labels[i, j]
            else:
                k, i, j = scind.distance_transform_edt(labels == 0,
                                                       return_distances=False,
                                                       return_indices=True)
                labels = labels[k, i, j]
            expanded_labels = labels  # for display
            distance = 1  # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = np.arange(nobjects, dtype=np.int32) + 1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(
                scind.sum(np.ones(labels.shape), labels, object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(
                scind.sum(np.ones(labels.shape), perimeter_outlines,
                          object_indexes))

            i, j = np.mgrid[0:nobjects, 0:nneighbors]
            distance_matrix = np.sqrt((ocenters[i, 0] - ncenters[j, 0])**2 +
                                      (ocenters[i, 1] - ncenters[j, 1])**2)
            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            if distance_matrix.shape[1] == 1:
                # a little buggy, lexsort assumes that a 2-d array of
                # second dimension = 1 is a 1-d array
                order = np.zeros(distance_matrix.shape, int)
            else:
                order = np.lexsort([distance_matrix])
            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index, 1] - ocenters[:, 1]
            first_y_vector = ncenters[first_object_index, 0] - ocenters[:, 0]
            if nneighbors > first_neighbor + 1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,
                                           1] - ocenters[:, 1]
                second_y_vector = ncenters[second_object_index,
                                           0] - ocenters[:, 0]
                v1 = np.array((first_x_vector, first_y_vector))
                v2 = np.array((second_x_vector, second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = np.sum(v1 * v2, 0) / np.sqrt(
                    np.sum(v1**2, 0) * np.sum(v2**2, 0))
                angle = np.arccos(dot) * 180.0 / np.pi

            # Make the structuring element for dilation
            if dimensions == 2:
                strel = strel_disk(distance)
            else:
                strel = skimage.morphology.ball(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            if dimensions == 2:
                strel_touching = strel_disk(distance + 0.5)
            else:
                strel_touching = skimage.morphology.ball(distance + 0.5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            if dimensions == 2:
                i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]

                minimums_i, maximums_i, _, _ = scind.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scind.extrema(
                    j, labels, object_indexes)

                minimums_i = np.maximum(fix(minimums_i) - distance,
                                        0).astype(int)
                maximums_i = np.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_j = np.maximum(fix(minimums_j) - distance,
                                        0).astype(int)
                maximums_j = np.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[1]).astype(int)
            else:
                k, i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1],
                                   0:labels.shape[2]]

                minimums_k, maximums_k, _, _ = scind.extrema(
                    k, labels, object_indexes)
                minimums_i, maximums_i, _, _ = scind.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scind.extrema(
                    j, labels, object_indexes)

                minimums_k = np.maximum(fix(minimums_k) - distance,
                                        0).astype(int)
                maximums_k = np.minimum(
                    fix(maximums_k) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_i = np.maximum(fix(minimums_i) - distance,
                                        0).astype(int)
                maximums_i = np.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[1]).astype(int)
                minimums_j = np.maximum(fix(minimums_j) - distance,
                                        0).astype(int)
                maximums_j = np.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[2]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g., not renumbered.
                    #
                    continue
                index = object_number - 1
                if dimensions == 2:
                    patch = labels[minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]
                else:
                    patch = labels[minimums_k[index]:maximums_k[index],
                                   minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]

                #
                # Find the neighbors
                #
                patch_mask = patch == (index + 1)
                extended = scind.binary_dilation(patch_mask, strel)
                neighbors = np.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(np.ones(nc, int) * object_number)
                    second_objects.append(neighbors)
                #
                # Find the # of overlapping pixels. Dilate the neighbors
                # and see how many pixels overlap our image. Use a 3x3
                # structuring element to expand the overlapping edge
                # into the perimeter.
                #
                if dimensions == 2:
                    outline_patch = (perimeter_outlines[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                else:
                    outline_patch = (perimeter_outlines[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                if self.neighbors_are_objects:
                    extended = scind.binary_dilation(
                        (patch != 0) & (patch != object_number),
                        strel_touching)
                else:
                    extended = scind.binary_dilation((npatch != 0),
                                                     strel_touching)
                overlap = np.sum(outline_patch & extended)
                pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = np.hstack(first_objects)
                reverse_object_numbers = np.zeros(
                    max(np.max(object_numbers), np.max(first_objects)) + 1,
                    int)
                reverse_object_numbers[object_numbers] = (
                    np.arange(len(object_numbers)) + 1)
                first_objects = reverse_object_numbers[first_objects]

                second_objects = np.hstack(second_objects)
                reverse_neighbor_numbers = np.zeros(
                    max(np.max(neighbor_numbers), np.max(second_objects)) + 1,
                    int)
                reverse_neighbor_numbers[neighbor_numbers] = (
                    np.arange(len(neighbor_numbers)) + 1)
                second_objects = reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects = second_objects[to_keep]
            else:
                first_objects = np.zeros(0, int)
                second_objects = np.zeros(0, int)
            percent_touching = pixel_count * 100 / perimeters
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = np.zeros(nkept_objects, int)
            second_object_number = np.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, np.newaxis], 0] -
                      ncenters[neighbor_indexes[np.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, np.newaxis], 1] -
                      ncenters[neighbor_indexes[np.newaxis, :], 1])
                distance_matrix = np.sqrt(di * di + dj * dj)
                distance_matrix[~has_pixels, :] = np.inf
                distance_matrix[:, ~neighbor_has_pixels] = np.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = np.lexsort([distance_matrix
                                    ]).astype(first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels, 1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels, 0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = np.zeros(0, int)
            second_objects = np.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert isinstance(workspace, cpw.Workspace)
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (
                M_FIRST_CLOSEST_DISTANCE,
                np.sqrt(first_x_vector**2 + first_y_vector**2),
            ),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (
                M_SECOND_CLOSEST_DISTANCE,
                np.sqrt(second_x_vector**2 + second_y_vector**2),
            ),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle),
            (M_PERCENT_TOUCHING, percent_touching),
        ]
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name), data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num,
                cpmeas.NEIGHBORS,
                self.object_name.value,
                self.object_name.value
                if self.neighbors_are_objects else self.neighbors_name.value,
                m.image_set_number * np.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * np.ones(second_objects.shape, int),
                second_objects,
            )

        labels = kept_labels

        neighbor_count_image = np.zeros(labels.shape, int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask] - 1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image

        percent_touching_image = np.zeros(labels.shape)
        percent_touching_image[object_mask] = percent_touching[object_indexes]
        workspace.display_data.percent_touching_image = percent_touching_image

        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            count_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = cpprefs.get_default_colormap()
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            touching_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.touching_image_name.value, touching_image)
        else:
            percent_touching_cm_name = cpprefs.get_default_colormap()
            percent_touching_cm = matplotlib.cm.get_cmap(
                percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.neighbor_labels = neighbor_labels
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
            workspace.display_data.dimensions = dimensions
Ejemplo n.º 11
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)

        alpha = numpy.zeros(objects.shape)

        convert = True

        if self.image_mode == "Binary (black & white)":
            pixel_data = numpy.zeros(objects.shape, bool)
        elif self.image_mode == "Grayscale":
            pixel_data = numpy.zeros(objects.shape)
        elif self.image_mode == "uint16":
            pixel_data = numpy.zeros(objects.shape, numpy.int32)
            convert = False
        else:
            pixel_data = numpy.zeros(objects.shape + (3, ))

        for labels, _ in objects.get_labels():
            mask = labels != 0

            if numpy.all(~mask):
                continue

            if self.image_mode == "Binary (black & white)":
                pixel_data[mask] = True

                alpha[mask] = 1
            elif self.image_mode == "Grayscale":
                pixel_data[mask] = labels[mask].astype(float) / numpy.max(
                    labels)

                alpha[mask] = 1
            elif self.image_mode == "Color":
                if self.colormap.value == DEFAULT_COLORMAP:
                    cm_name = get_default_colormap()
                elif self.colormap.value == "colorcube":
                    # Colorcube missing from matplotlib
                    cm_name = "gist_rainbow"
                elif self.colormap.value == "lines":
                    # Lines missing from matplotlib and not much like it,
                    # Pretty boring palette anyway, hence
                    cm_name = "Pastel1"
                elif self.colormap.value == "white":
                    # White missing from matplotlib, it's just a colormap
                    # of all completely white... not even different kinds of
                    # white. And, isn't white just a uniform sampling of
                    # frequencies from the spectrum?
                    cm_name = "Spectral"
                else:
                    cm_name = self.colormap.value

                cm = matplotlib.cm.get_cmap(cm_name)

                mapper = matplotlib.cm.ScalarMappable(cmap=cm)

                if labels.ndim == 3:
                    for index, plane in enumerate(mask):
                        pixel_data[index, plane, :] = mapper.to_rgba(
                            centrosome.cpmorphology.distance_color_labels(
                                labels[index]))[plane, :3]
                else:
                    pixel_data[mask, :] += mapper.to_rgba(
                        centrosome.cpmorphology.distance_color_labels(labels))[
                            mask, :3]

                alpha[mask] += 1
            elif self.image_mode == "uint16":
                pixel_data[mask] = labels[mask]

                alpha[mask] = 1

        mask = alpha > 0

        if self.image_mode == "Color":
            pixel_data[
                mask, :] = pixel_data[mask, :] / alpha[mask][:, numpy.newaxis]
        elif self.image_mode != "Binary (black & white)":
            pixel_data[mask] = pixel_data[mask] / alpha[mask]

        image = Image(
            pixel_data,
            parent_image=objects.parent_image,
            convert=convert,
            dimensions=len(objects.shape),
        )

        workspace.image_set.add(self.image_name.value, image)

        if self.show_window:
            if image.dimensions == 2:
                workspace.display_data.ijv = objects.ijv
            else:
                workspace.display_data.segmented = objects.segmented

            workspace.display_data.pixel_data = pixel_data

            workspace.display_data.dimensions = image.dimensions