def record_measurement(self, workspace, image_name, object_name, scale,
                           feature_name, result):
        """Record the result of a measurement in the workspace's
        measurements"""
        data = fix(result)
        data[~np.isfinite(data)] = 0

        if scale == "-":
            workspace.add_measurement(
                object_name, "%s_%s_%s" % (TEXTURE, feature_name, image_name),
                data)
            statistics = [[
                image_name, object_name, feature_name, scale,
                "%f" % (d) if len(data) else "-"
            ] for d in data]
        else:
            workspace.add_measurement(
                object_name, "%s_%s_%s_%s" %
                (TEXTURE, feature_name, image_name, str(scale)), data)
            statistics = [[
                image_name, object_name,
                "%s %s" % (aggregate_name, feature_name), scale,
                "%.2f" % fn(data) if len(data) else "-"
            ] for aggregate_name, fn in (("min", np.min), ("max", np.max),
                                         ("mean", np.mean),
                                         ("median", np.median), ("std dev",
                                                                 np.std))]
        return statistics
Exemplo n.º 2
0
    def run(self, workspace):
        parents = workspace.object_set.get_objects(self.parent_name.value)
        children = workspace.object_set.get_objects(self.sub_object_name.value)
        child_count, parents_of = parents.relate_children(children)
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        m.add_measurement(self.sub_object_name.value,
                          FF_PARENT % (self.parent_name.value), parents_of)
        m.add_measurement(self.parent_name.value,
                          FF_CHILDREN_COUNT % (self.sub_object_name.value),
                          child_count)
        good_parents = parents_of[parents_of != 0]
        image_numbers = np.ones(len(good_parents), int) * m.image_set_number
        good_children = np.argwhere(parents_of != 0).flatten() + 1
        if np.any(good_parents):
            m.add_relate_measurement(self.module_num, R_PARENT,
                                     self.parent_name.value,
                                     self.sub_object_name.value, image_numbers,
                                     good_parents, image_numbers,
                                     good_children)
            m.add_relate_measurement(self.module_num, R_CHILD,
                                     self.sub_object_name.value,
                                     self.parent_name.value, image_numbers,
                                     good_children, image_numbers,
                                     good_parents)
        parent_names = self.get_parent_names()
        for parent_name in parent_names:
            if self.find_parent_child_distances in (D_BOTH, D_CENTROID):
                self.calculate_centroid_distances(workspace, parent_name)
            if self.find_parent_child_distances in (D_BOTH, D_MINIMUM):
                self.calculate_minimum_distances(workspace, parent_name)

        if self.wants_per_parent_means.value:
            parent_indexes = np.arange(np.max(parents.segmented)) + 1
            for feature_name in m.get_feature_names(
                    self.sub_object_name.value):
                if not self.should_aggregate_feature(feature_name):
                    continue
                data = m.get_current_measurement(self.sub_object_name.value,
                                                 feature_name)
                if data is not None and len(data) > 0:
                    if len(parents_of) > 0:
                        means = fix(
                            scind.mean(data.astype(float), parents_of,
                                       parent_indexes))
                    else:
                        means = np.zeros((0, ))
                else:
                    # No child measurements - all NaN
                    means = np.ones(len(parents_of)) * np.nan
                mean_feature_name = FF_MEAN % (self.sub_object_name.value,
                                               feature_name)
                m.add_measurement(self.parent_name.value, mean_feature_name,
                                  means)

        if self.show_window:
            workspace.display_data.parent_labels = parents.segmented
            workspace.display_data.parent_count = parents.count
            workspace.display_data.child_labels = children.segmented
            workspace.display_data.parents_of = parents_of
 def run_one_gabor(self, image_name, object_name, scale, workspace):
     objects = workspace.get_objects(object_name)
     labels = objects.segmented
     object_count = np.max(labels)
     if object_count > 0:
         image = workspace.image_set.get_image(image_name,
                                               must_be_grayscale=True)
         pixel_data = image.pixel_data
         labels = objects.segmented
         if image.has_mask:
             mask = image.mask
         else:
             mask = None
         try:
             pixel_data = objects.crop_image_similarly(pixel_data)
             if mask is not None:
                 mask = objects.crop_image_similarly(mask)
                 labels[~mask] = 0
         except ValueError:
             pixel_data, m1 = size_similarly(labels, pixel_data)
             labels[~m1] = 0
             if mask is not None:
                 mask, m2 = size_similarly(labels, mask)
                 labels[~m2] = 0
                 labels[~mask] = 0
         pixel_data = normalized_per_object(pixel_data, labels)
         best_score = np.zeros((object_count,))
         for angle in range(self.gabor_angles.value):
             theta = np.pi * angle / self.gabor_angles.value
             g = gabor(pixel_data, labels, scale, theta)
             score_r = fix(scind.sum(g.real, labels,
                                      np.arange(object_count, dtype=np.int32)+ 1))
             score_i = fix(scind.sum(g.imag, labels,
                                      np.arange(object_count, dtype=np.int32)+ 1))
             score = np.sqrt(score_r**2+score_i**2)
             best_score = np.maximum(best_score, score)
     else:
         best_score = np.zeros((0,))
     statistics = self.record_measurement(workspace, 
                                          image_name, 
                                          object_name, 
                                          scale,
                                          F_GABOR, 
                                          best_score)
     return statistics
Exemplo n.º 4
0
 def run_one_gabor(self, image_name, object_name, scale, workspace):
     objects = workspace.get_objects(object_name)
     labels = objects.segmented
     object_count = np.max(labels)
     if object_count > 0:
         image = workspace.image_set.get_image(image_name,
                                               must_be_grayscale=True)
         pixel_data = image.pixel_data
         labels = objects.segmented
         if image.has_mask:
             mask = image.mask
         else:
             mask = None
         try:
             pixel_data = objects.crop_image_similarly(pixel_data)
             if mask is not None:
                 mask = objects.crop_image_similarly(mask)
                 labels[~mask] = 0
         except ValueError:
             pixel_data, m1 = cpo.size_similarly(labels, pixel_data)
             labels[~m1] = 0
             if mask is not None:
                 mask, m2 = cpo.size_similarly(labels, mask)
                 labels[~m2] = 0
                 labels[~mask] = 0
         pixel_data = normalized_per_object(pixel_data, labels)
         best_score = np.zeros((object_count,))
         for angle in range(self.gabor_angles.value):
             theta = np.pi * angle / self.gabor_angles.value
             g = gabor(pixel_data, labels, scale, theta)
             score_r = fix(scind.sum(g.real, labels,
                                     np.arange(object_count, dtype=np.int32) + 1))
             score_i = fix(scind.sum(g.imag, labels,
                                     np.arange(object_count, dtype=np.int32) + 1))
             score = np.sqrt(score_r ** 2 + score_i ** 2)
             best_score = np.maximum(best_score, score)
     else:
         best_score = np.zeros((0,))
     statistics = self.record_measurement(workspace,
                                          image_name,
                                          object_name,
                                          scale,
                                          F_GABOR,
                                          best_score)
     return statistics
Exemplo n.º 5
0
 def record_measurement(self, workspace, object_name, feature_name, result):
     """Record the result of a measurement in the workspace's measurements"""
     data = fix(result)
     workspace.add_measurement(object_name,
                               "%s_%s" % (AREA_SHAPE, feature_name), data)
     if self.show_window and np.any(np.isfinite(data)) > 0:
         data = data[np.isfinite(data)]
         workspace.display_data.statistics.append(
             (object_name, feature_name, "%.2f" % np.mean(data),
              "%.2f" % np.median(data), "%.2f" % np.std(data)))
Exemplo n.º 6
0
 def __init__(self, name):
     self.name = name
     self.labels = workspace.object_set.get_objects(name).segmented
     self.nobjects = np.max(self.labels)
     if self.nobjects != 0:
         self.range = np.arange(1, np.max(self.labels) + 1)
         self.labels = self.labels.copy()
         self.labels[~im.mask] = 0
         self.current_mean = fix(
             scind.mean(im.pixel_data, self.labels, self.range))
         self.start_mean = np.maximum(self.current_mean,
                                      np.finfo(float).eps)
Exemplo n.º 7
0
 def record_measurement(self,workspace,  
                        object_name, feature_name, result):
     """Record the result of a measurement in the workspace's measurements"""
     data = fix(result)
     workspace.add_measurement(object_name, 
                               "%s_%s"%(AREA_SHAPE,feature_name), 
                               data)
     if self.show_window and np.any(np.isfinite(data)) > 0:
         data = data[np.isfinite(data)]
         workspace.display_data.statistics.append(
             (object_name, feature_name, 
              "%.2f"%np.mean(data),
              "%.2f"%np.median(data),
              "%.2f"%np.std(data)))
Exemplo n.º 8
0
 def record_measurement(self, workspace, image_name, object_name,
                        feature_name, result):
     """Record the result of a measurement in the workspace's
     measurements"""
     data = fix(result)
     data[~np.isfinite(data)] = 0
     workspace.add_measurement(
         object_name, "%s_%s_%s" % (HISTOGRAM, feature_name, image_name),
         data)
     statistics = [[
         image_name, object_name, feature_name,
         "%f" % (d) if len(data) else "-"
     ] for d in data]
     return statistics
Exemplo n.º 9
0
 def __init__(self, name):
     self.name = name
     self.labels = workspace.object_set.get_objects(name).segmented
     self.nobjects = np.max(self.labels)
     if self.nobjects != 0:
         self.range = np.arange(1, np.max(self.labels) + 1)
         self.labels = self.labels.copy()
         self.labels[~ im.mask] = 0
         self.current_mean = fix(
                 scind.mean(im.pixel_data,
                            self.labels,
                            self.range))
         self.start_mean = np.maximum(
                 self.current_mean, np.finfo(float).eps)
Exemplo n.º 10
0
 def record_measurement(self, workspace,
                        image_name, object_name, scale,
                        feature_name, result):
     """Record the result of a measurement in the workspace's
     measurements"""
     data = fix(result)
     data[~np.isfinite(data)] = 0
     workspace.add_measurement(
             object_name,
             "%s_%s_%s_%s" % (TEXTURE, feature_name, image_name, str(scale)),
             data)
     statistics = [[image_name, object_name,
                    "%s %s" % (aggregate_name, feature_name), scale,
                    "%.2f" % fn(data) if len(data) else "-"]
                   for aggregate_name, fn in (("min", np.min),
                                              ("max", np.max),
                                              ("mean", np.mean),
                                              ("median", np.median),
                                              ("std dev", np.std))]
     return statistics
Exemplo n.º 11
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(
                skeleton_name, must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels,
                                     footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:
            def size_fn(area, is_object):
                return (~ is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(
                    combined_skel, ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1] &
                    combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # eminating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(scind.sum(branching_counts, nearby_labels,
                                         label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0,), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(scind.sum(branch_points, outside_labels,
                                          label_range))
        else:
            branch_counts = np.zeros((0,), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels, label_range))
        else:
            end_counts = np.zeros((0,), int)
        #
        # Calculate the distances
        #
        total_distance = morph.skeleton_length(
                dlabels * outside_skel, label_range)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_NEURON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join((C_NEURON, F_NUMBER_NON_TRUNK_BRANCHES,
                            skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join((C_NEURON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        feature = "_".join((C_NEURON, F_TOTAL_NEURITE_LENGTH, skeleton_name))
        m[seed_objects_name, feature] = total_distance
        #
        # Collect the graph information
        #
        if self.wants_neuron_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                    self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_neuron_graph(
                    combined_skel, dlabels,
                    trunk_mask,
                    branch_points & ~trunk_mask,
                    end_points,
                    intensity_image.pixel_data)

            image_number = workspace.measurements.image_set_number

            edge_path, vertex_path = self.get_graph_file_paths(m, m.image_number)
            workspace.interaction_request(
                    self, m.image_number, edge_path, edge_graph,
                    vertex_path, vertex_graph, headless_ok=True)

            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros((skeleton.shape[0],
                                          skeleton.shape[1],
                                          3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image,
                               parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
Exemplo n.º 12
0
 def run_image_pair_objects(self, workspace, first_image_name,
                            second_image_name, object_name):
     '''Calculate per-object correlations between intensities in two images'''
     first_image = workspace.image_set.get_image(first_image_name,
                                                 must_be_grayscale=True)
     second_image = workspace.image_set.get_image(second_image_name,
                                                  must_be_grayscale=True)
     objects = workspace.object_set.get_objects(object_name)
     #
     # Crop both images to the size of the labels matrix
     #
     labels = objects.segmented
     try:
         first_pixels  = objects.crop_image_similarly(first_image.pixel_data)
         first_mask    = objects.crop_image_similarly(first_image.mask)
     except ValueError:
         first_pixels, m1 = cpo.size_similarly(labels, first_image.pixel_data)
         first_mask, m1 = cpo.size_similarly(labels, first_image.mask)
         first_mask[~m1] = False
     try:
         second_pixels = objects.crop_image_similarly(second_image.pixel_data)
         second_mask   = objects.crop_image_similarly(second_image.mask)
     except ValueError:
         second_pixels, m1 = cpo.size_similarly(labels, second_image.pixel_data)
         second_mask, m1 = cpo.size_similarly(labels, second_image.mask)
         second_mask[~m1] = False
     mask   = ((labels > 0) & first_mask & second_mask)
     first_pixels = first_pixels[mask]
     second_pixels = second_pixels[mask]
     labels = labels[mask]
     n_objects = objects.count
     if n_objects == 0:
         corr = np.zeros((0,))
     else:
         #
         # The correlation is sum((x-mean(x))(y-mean(y)) /
         #                         ((n-1) * std(x) *std(y)))
         #
         lrange = np.arange(n_objects,dtype=np.int32)+1
         area  = fix(scind.sum(np.ones_like(labels), labels, lrange))
         mean1 = fix(scind.mean(first_pixels, labels, lrange))
         mean2 = fix(scind.mean(second_pixels, labels, lrange))
         #
         # Calculate the standard deviation times the population.
         #
         std1 = np.sqrt(fix(scind.sum((first_pixels-mean1[labels-1])**2,
                                      labels, lrange)))
         std2 = np.sqrt(fix(scind.sum((second_pixels-mean2[labels-1])**2,
                                      labels, lrange)))
         x = first_pixels - mean1[labels-1]  # x - mean(x)
         y = second_pixels - mean2[labels-1] # y - mean(y)
         corr = fix(scind.sum(x * y / (std1[labels-1] * std2[labels-1]),
                              labels, lrange))
         # Explicitly set the correlation to NaN for masked objects
         corr[scind.sum(1, labels, lrange) == 0] = np.NaN
     measurement = ("Correlation_Correlation_%s_%s" %
                    (first_image_name, second_image_name))
     workspace.measurements.add_measurement(object_name, measurement, corr)
     if n_objects == 0:
         return [[first_image_name, second_image_name, object_name,
                  "Mean correlation","-"],
                 [first_image_name, second_image_name, object_name,
                  "Median correlation","-"],
                 [first_image_name, second_image_name, object_name,
                  "Min correlation","-"],
                 [first_image_name, second_image_name, object_name,
                  "Max correlation","-"]]
     else:
         return [[first_image_name, second_image_name, object_name,
                  "Mean correlation","%.2f"%np.mean(corr)],
                 [first_image_name, second_image_name, object_name,
                  "Median correlation","%.2f"%np.median(corr)],
                 [first_image_name, second_image_name, object_name,
                  "Min correlation","%.2f"%np.min(corr)],
                 [first_image_name, second_image_name, object_name,
                  "Max correlation","%.2f"%np.max(corr)]]
Exemplo n.º 13
0
def minimum(input, labels, index):
    return fix(scind.minimum(input, labels, index))
Exemplo n.º 14
0
    def run(self, workspace):
        if self.show_window:
            workspace.display_data.col_labels = ("Image", "Object", "Feature", "Mean", "Median", "STD")
            workspace.display_data.statistics = statistics = []
        for image_name in [img.name for img in self.images]:
            image = workspace.image_set.get_image(image_name.value, must_be_grayscale=True)
            for object_name in [obj.name for obj in self.objects]:
                # Need to refresh image after each iteration...
                img = image.pixel_data
                if image.has_mask:
                    masked_image = img.copy()
                    masked_image[~image.mask] = 0
                else:
                    masked_image = img
                objects = workspace.object_set.get_objects(object_name.value)
                nobjects = objects.count
                integrated_intensity = np.zeros((nobjects,))
                integrated_intensity_edge = np.zeros((nobjects,))
                mean_intensity = np.zeros((nobjects,))
                mean_intensity_edge = np.zeros((nobjects,))
                std_intensity = np.zeros((nobjects,))
                std_intensity_edge = np.zeros((nobjects,))
                min_intensity = np.zeros((nobjects,))
                min_intensity_edge = np.zeros((nobjects,))
                max_intensity = np.zeros((nobjects,))
                max_intensity_edge = np.zeros((nobjects,))
                mass_displacement = np.zeros((nobjects,))
                lower_quartile_intensity = np.zeros((nobjects,))
                median_intensity = np.zeros((nobjects,))
                mad_intensity = np.zeros((nobjects,))
                upper_quartile_intensity = np.zeros((nobjects,))
                cmi_x = np.zeros((nobjects,))
                cmi_y = np.zeros((nobjects,))
                max_x = np.zeros((nobjects,))
                max_y = np.zeros((nobjects,))
                for labels, lindexes in objects.get_labels():
                    lindexes = lindexes[lindexes != 0]
                    labels, img = cpo.crop_labels_and_image(labels, img)
                    _, masked_image = cpo.crop_labels_and_image(labels, masked_image)
                    outlines = cpmo.outline(labels)

                    if image.has_mask:
                        _, mask = cpo.crop_labels_and_image(labels, image.mask)
                        masked_labels = labels.copy()
                        masked_labels[~mask] = 0
                        masked_outlines = outlines.copy()
                        masked_outlines[~mask] = 0
                    else:
                        masked_labels = labels
                        masked_outlines = outlines

                    lmask = masked_labels > 0 & np.isfinite(img)  # Ignore NaNs, Infs
                    has_objects = np.any(lmask)
                    if has_objects:
                        limg = img[lmask]
                        llabels = labels[lmask]
                        mesh_y, mesh_x = np.mgrid[0 : masked_image.shape[0], 0 : masked_image.shape[1]]
                        mesh_x = mesh_x[lmask]
                        mesh_y = mesh_y[lmask]
                        lcount = fix(nd.sum(np.ones(len(limg)), llabels, lindexes))
                        integrated_intensity[lindexes - 1] = fix(nd.sum(limg, llabels, lindexes))
                        mean_intensity[lindexes - 1] = integrated_intensity[lindexes - 1] / lcount
                        std_intensity[lindexes - 1] = np.sqrt(
                            fix(nd.mean((limg - mean_intensity[llabels - 1]) ** 2, llabels, lindexes))
                        )
                        min_intensity[lindexes - 1] = fix(nd.minimum(limg, llabels, lindexes))
                        max_intensity[lindexes - 1] = fix(nd.maximum(limg, llabels, lindexes))
                        # Compute the position of the intensity maximum
                        max_position = np.array(fix(nd.maximum_position(limg, llabels, lindexes)), dtype=int)
                        max_position = np.reshape(max_position, (max_position.shape[0],))
                        max_x[lindexes - 1] = mesh_x[max_position]
                        max_y[lindexes - 1] = mesh_y[max_position]
                        # The mass displacement is the distance between the center
                        # of mass of the binary image and of the intensity image. The
                        # center of mass is the average X or Y for the binary image
                        # and the sum of X or Y * intensity / integrated intensity
                        cm_x = fix(nd.mean(mesh_x, llabels, lindexes))
                        cm_y = fix(nd.mean(mesh_y, llabels, lindexes))

                        i_x = fix(nd.sum(mesh_x * limg, llabels, lindexes))
                        i_y = fix(nd.sum(mesh_y * limg, llabels, lindexes))
                        cmi_x[lindexes - 1] = i_x / integrated_intensity[lindexes - 1]
                        cmi_y[lindexes - 1] = i_y / integrated_intensity[lindexes - 1]
                        diff_x = cm_x - cmi_x[lindexes - 1]
                        diff_y = cm_y - cmi_y[lindexes - 1]
                        mass_displacement[lindexes - 1] = np.sqrt(diff_x * diff_x + diff_y * diff_y)
                        #
                        # Sort the intensities by label, then intensity.
                        # For each label, find the index above and below
                        # the 25%, 50% and 75% mark and take the weighted
                        # average.
                        #
                        order = np.lexsort((limg, llabels))
                        areas = lcount.astype(int)
                        indices = np.cumsum(areas) - areas
                        for dest, fraction in (
                            (lower_quartile_intensity, 1.0 / 4.0),
                            (median_intensity, 1.0 / 2.0),
                            (upper_quartile_intensity, 3.0 / 4.0),
                        ):
                            qindex = indices.astype(float) + areas * fraction
                            qfraction = qindex - np.floor(qindex)
                            qindex = qindex.astype(int)
                            qmask = qindex < indices + areas - 1
                            qi = qindex[qmask]
                            qf = qfraction[qmask]
                            dest[lindexes[qmask] - 1] = limg[order[qi]] * (1 - qf) + limg[order[qi + 1]] * qf
                            #
                            # In some situations (e.g. only 3 points), there may
                            # not be an upper bound.
                            #
                            qmask = (~qmask) & (areas > 0)
                            dest[lindexes[qmask] - 1] = limg[order[qindex[qmask]]]
                        #
                        # Once again, for the MAD
                        #
                        madimg = np.abs(limg - median_intensity[llabels - 1])
                        order = np.lexsort((madimg, llabels))
                        qindex = indices.astype(float) + areas / 2.0
                        qfraction = qindex - np.floor(qindex)
                        qindex = qindex.astype(int)
                        qmask = qindex < indices + areas - 1
                        qi = qindex[qmask]
                        qf = qfraction[qmask]
                        mad_intensity[lindexes[qmask] - 1] = madimg[order[qi]] * (1 - qf) + madimg[order[qi + 1]] * qf
                        qmask = (~qmask) & (areas > 0)
                        mad_intensity[lindexes[qmask] - 1] = madimg[order[qindex[qmask]]]

                    emask = masked_outlines > 0
                    eimg = img[emask]
                    elabels = labels[emask]
                    has_edge = len(eimg) > 0
                    if has_edge:
                        ecount = fix(nd.sum(np.ones(len(eimg)), elabels, lindexes))
                        integrated_intensity_edge[lindexes - 1] = fix(nd.sum(eimg, elabels, lindexes))
                        mean_intensity_edge[lindexes - 1] = integrated_intensity_edge[lindexes - 1] / ecount
                        std_intensity_edge[lindexes - 1] = np.sqrt(
                            fix(nd.mean((eimg - mean_intensity_edge[elabels - 1]) ** 2, elabels, lindexes))
                        )
                        min_intensity_edge[lindexes - 1] = fix(nd.minimum(eimg, elabels, lindexes))
                        max_intensity_edge[lindexes - 1] = fix(nd.maximum(eimg, elabels, lindexes))
                m = workspace.measurements
                for category, feature_name, measurement in (
                    (INTENSITY, INTEGRATED_INTENSITY, integrated_intensity),
                    (INTENSITY, MEAN_INTENSITY, mean_intensity),
                    (INTENSITY, STD_INTENSITY, std_intensity),
                    (INTENSITY, MIN_INTENSITY, min_intensity),
                    (INTENSITY, MAX_INTENSITY, max_intensity),
                    (INTENSITY, INTEGRATED_INTENSITY_EDGE, integrated_intensity_edge),
                    (INTENSITY, MEAN_INTENSITY_EDGE, mean_intensity_edge),
                    (INTENSITY, STD_INTENSITY_EDGE, std_intensity_edge),
                    (INTENSITY, MIN_INTENSITY_EDGE, min_intensity_edge),
                    (INTENSITY, MAX_INTENSITY_EDGE, max_intensity_edge),
                    (INTENSITY, MASS_DISPLACEMENT, mass_displacement),
                    (INTENSITY, LOWER_QUARTILE_INTENSITY, lower_quartile_intensity),
                    (INTENSITY, MEDIAN_INTENSITY, median_intensity),
                    (INTENSITY, MAD_INTENSITY, mad_intensity),
                    (INTENSITY, UPPER_QUARTILE_INTENSITY, upper_quartile_intensity),
                    (C_LOCATION, LOC_CMI_X, cmi_x),
                    (C_LOCATION, LOC_CMI_Y, cmi_y),
                    (C_LOCATION, LOC_MAX_X, max_x),
                    (C_LOCATION, LOC_MAX_Y, max_y),
                ):
                    measurement_name = "%s_%s_%s" % (category, feature_name, image_name.value)
                    m.add_measurement(object_name.value, measurement_name, measurement)
                    if self.show_window and len(measurement) > 0:
                        statistics.append(
                            (
                                image_name.value,
                                object_name.value,
                                feature_name,
                                np.round(np.mean(measurement), 3),
                                np.round(np.median(measurement), 3),
                                np.round(np.std(measurement), 3),
                            )
                        )
Exemplo n.º 15
0
def cooccurrence(quantized_image, labels, scale_i=3, scale_j=0):
    """Calculates co-occurrence matrices for all the objects in the image.

    Return an array P of shape (nobjects, nlevels, nlevels) such that
    P[o, :, :] is the cooccurence matrix for object o.

    quantized_image -- a numpy array of integer type
    labels          -- a numpy array of integer type
    scale           -- an integer

    For each object O, the cooccurrence matrix is defined as follows.
    Given a row number I in the matrix, let A be the set of pixels in
    O with gray level I, excluding pixels in the rightmost S
    columns of the image.  Let B be the set of pixels in O that are S
    pixels to the right of a pixel in A.  Row I of the cooccurence
    matrix is the gray-level histogram of the pixels in B.
    """
    labels = labels.astype(int)
    nlevels = quantized_image.max() + 1
    nobjects = labels.max()
    if scale_i < 0:
        scale_i = -scale_i
        scale_j = -scale_j
    if scale_i == 0 and scale_j > 0:
        image_a = quantized_image[:, :-scale_j]
        image_b = quantized_image[:, scale_j:]
        labels_ab = labels_a = labels[:, :-scale_j]
        labels_b = labels[:, scale_j:]
    elif scale_i > 0 and scale_j == 0:
        image_a = quantized_image[:-scale_i, :]
        image_b = quantized_image[scale_i:, :]
        labels_ab = labels_a = labels[:-scale_i, :]
        labels_b = labels[scale_i:, :]
    elif scale_i > 0 and scale_j > 0:
        image_a = quantized_image[:-scale_i, :-scale_j]
        image_b = quantized_image[scale_i:, scale_j:]
        labels_ab = labels_a = labels[:-scale_i, :-scale_j]
        labels_b = labels[scale_i:, scale_j:]
    else:
        # scale_j should be negative
        image_a = quantized_image[:-scale_i, -scale_j:]
        image_b = quantized_image[scale_i:, :scale_j]
        labels_ab = labels_a = labels[:-scale_i, -scale_j:]
        labels_b = labels[scale_i:, :scale_j]
    equilabel = ((labels_a == labels_b) & (labels_a > 0))
    if np.any(equilabel):

        Q = (nlevels*nlevels*(labels_ab[equilabel]-1)+
             nlevels*image_a[equilabel]+image_b[equilabel])
        R = np.bincount(Q)
        if R.size != nobjects*nlevels*nlevels:
            S = np.zeros(nobjects*nlevels*nlevels-R.size)
            R = np.hstack((R, S))
        P = R.reshape(nobjects, nlevels, nlevels)
        pixel_count = fix(scind.sum(equilabel, labels_ab,
                                    np.arange(nobjects, dtype=np.int32)+1))
        pixel_count = np.tile(pixel_count[:,np.newaxis,np.newaxis],
                              (1,nlevels,nlevels))
        return (P.astype(float) / pixel_count.astype(float), nlevels)
    else:
        return np.zeros((nobjects, nlevels, nlevels)), nlevels
Exemplo n.º 16
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        dimensions = len(objects.shape)
        assert isinstance(objects, Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        neighbor_labels = neighbor_objects.small_removed_segmented
        neighbor_kept_labels = neighbor_objects.segmented
        assert isinstance(neighbor_objects, Objects)
        if not self.wants_excluded_objects.value:
            # Remove labels not present in kept segmentation while preserving object IDs.
            mask = neighbor_kept_labels > 0
            neighbor_labels[~mask] = 0
        nobjects = numpy.max(labels)
        nkept_objects = len(objects.indices)
        nneighbors = numpy.max(neighbor_labels)

        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
            neighbor_has_pixels = has_pixels
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.small_removed_segmented)
            neighbor_has_pixels = numpy.bincount(
                neighbor_labels.ravel())[1:] > 0
        neighbor_count = numpy.zeros((nobjects, ))
        pixel_count = numpy.zeros((nobjects, ))
        first_object_number = numpy.zeros((nobjects, ), int)
        second_object_number = numpy.zeros((nobjects, ), int)
        first_x_vector = numpy.zeros((nobjects, ))
        second_x_vector = numpy.zeros((nobjects, ))
        first_y_vector = numpy.zeros((nobjects, ))
        second_y_vector = numpy.zeros((nobjects, ))
        angle = numpy.zeros((nobjects, ))
        percent_touching = numpy.zeros((nobjects, ))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            if dimensions == 2:
                i, j = scipy.ndimage.distance_transform_edt(
                    labels == 0, return_distances=False, return_indices=True)
                # Assign each background pixel to the label of its nearest
                # foreground pixel. Assign label to label for foreground.
                labels = labels[i, j]
            else:
                k, i, j = scipy.ndimage.distance_transform_edt(
                    labels == 0, return_distances=False, return_indices=True)
                labels = labels[k, i, j]
            expanded_labels = labels  # for display
            distance = 1  # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = numpy.arange(nobjects, dtype=numpy.int32) + 1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(
                scipy.ndimage.sum(numpy.ones(labels.shape), labels,
                                  object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(
                scipy.ndimage.sum(numpy.ones(labels.shape), perimeter_outlines,
                                  object_indexes))

            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            order = numpy.zeros((nobjects, min(nneighbors, 3)),
                                dtype=numpy.uint32)
            j = numpy.arange(nneighbors)
            # (0, 1, 2) unless there are less than 3 neighbors
            partition_keys = tuple(range(min(nneighbors, 3)))
            for i in range(nobjects):
                dr = numpy.sqrt((ocenters[i, 0] - ncenters[j, 0])**2 +
                                (ocenters[i, 1] - ncenters[j, 1])**2)
                order[i, :] = numpy.argpartition(dr, partition_keys)[:3]

            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index, 1] - ocenters[:, 1]
            first_y_vector = ncenters[first_object_index, 0] - ocenters[:, 0]
            if nneighbors > first_neighbor + 1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,
                                           1] - ocenters[:, 1]
                second_y_vector = ncenters[second_object_index,
                                           0] - ocenters[:, 0]
                v1 = numpy.array((first_x_vector, first_y_vector))
                v2 = numpy.array((second_x_vector, second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = numpy.sum(v1 * v2, 0) / numpy.sqrt(
                    numpy.sum(v1**2, 0) * numpy.sum(v2**2, 0))
                angle = numpy.arccos(dot) * 180.0 / numpy.pi

            # Make the structuring element for dilation
            if dimensions == 2:
                strel = strel_disk(distance)
            else:
                strel = skimage.morphology.ball(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            if dimensions == 2:
                strel_touching = strel_disk(distance + 0.5)
            else:
                strel_touching = skimage.morphology.ball(distance + 0.5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            if dimensions == 2:
                i, j = numpy.mgrid[0:labels.shape[0], 0:labels.shape[1]]

                minimums_i, maximums_i, _, _ = scipy.ndimage.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scipy.ndimage.extrema(
                    j, labels, object_indexes)

                minimums_i = numpy.maximum(fix(minimums_i) - distance,
                                           0).astype(int)
                maximums_i = numpy.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_j = numpy.maximum(fix(minimums_j) - distance,
                                           0).astype(int)
                maximums_j = numpy.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[1]).astype(int)
            else:
                k, i, j = numpy.mgrid[0:labels.shape[0], 0:labels.shape[1],
                                      0:labels.shape[2]]

                minimums_k, maximums_k, _, _ = scipy.ndimage.extrema(
                    k, labels, object_indexes)
                minimums_i, maximums_i, _, _ = scipy.ndimage.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scipy.ndimage.extrema(
                    j, labels, object_indexes)

                minimums_k = numpy.maximum(fix(minimums_k) - distance,
                                           0).astype(int)
                maximums_k = numpy.minimum(
                    fix(maximums_k) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_i = numpy.maximum(fix(minimums_i) - distance,
                                           0).astype(int)
                maximums_i = numpy.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[1]).astype(int)
                minimums_j = numpy.maximum(fix(minimums_j) - distance,
                                           0).astype(int)
                maximums_j = numpy.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[2]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g., not renumbered.
                    #
                    continue
                index = object_number - 1
                if dimensions == 2:
                    patch = labels[minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]
                else:
                    patch = labels[minimums_k[index]:maximums_k[index],
                                   minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]

                #
                # Find the neighbors
                #
                patch_mask = patch == (index + 1)
                if distance <= 5:
                    extended = scipy.ndimage.binary_dilation(patch_mask, strel)
                else:
                    extended = (scipy.signal.fftconvolve(
                        patch_mask, strel, mode="same") > 0.5)
                neighbors = numpy.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(numpy.ones(nc, int) * object_number)
                    second_objects.append(neighbors)
                #
                # Find the # of overlapping pixels. Dilate the neighbors
                # and see how many pixels overlap our image. Use a 3x3
                # structuring element to expand the overlapping edge
                # into the perimeter.
                #
                if dimensions == 2:
                    outline_patch = (perimeter_outlines[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                else:
                    outline_patch = (perimeter_outlines[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                if self.neighbors_are_objects:
                    extendme = (patch != 0) & (patch != object_number)
                    if distance <= 5:
                        extended = scipy.ndimage.binary_dilation(
                            extendme, strel_touching)
                    else:
                        extended = (scipy.signal.fftconvolve(
                            extendme, strel_touching, mode="same") > 0.5)
                else:
                    if distance <= 5:
                        extended = scipy.ndimage.binary_dilation(
                            (npatch != 0), strel_touching)
                    else:
                        extended = (scipy.signal.fftconvolve(
                            (npatch != 0), strel_touching, mode="same") > 0.5)
                overlap = numpy.sum(outline_patch & extended)
                pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = numpy.hstack(first_objects)
                reverse_object_numbers = numpy.zeros(
                    max(numpy.max(object_numbers), numpy.max(first_objects)) +
                    1, int)
                reverse_object_numbers[object_numbers] = (
                    numpy.arange(len(object_numbers)) + 1)
                first_objects = reverse_object_numbers[first_objects]

                second_objects = numpy.hstack(second_objects)
                reverse_neighbor_numbers = numpy.zeros(
                    max(numpy.max(neighbor_numbers), numpy.max(second_objects))
                    + 1, int)
                reverse_neighbor_numbers[neighbor_numbers] = (
                    numpy.arange(len(neighbor_numbers)) + 1)
                second_objects = reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects = second_objects[to_keep]
            else:
                first_objects = numpy.zeros(0, int)
                second_objects = numpy.zeros(0, int)
            percent_touching = pixel_count * 100 / perimeters
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = numpy.zeros(nkept_objects, int)
            second_object_number = numpy.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, numpy.newaxis], 0] -
                      ncenters[neighbor_indexes[numpy.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, numpy.newaxis], 1] -
                      ncenters[neighbor_indexes[numpy.newaxis, :], 1])
                distance_matrix = numpy.sqrt(di * di + dj * dj)
                distance_matrix[~has_pixels, :] = numpy.inf
                distance_matrix[:, ~neighbor_has_pixels] = numpy.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = numpy.lexsort([distance_matrix
                                       ]).astype(first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels, 1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels, 0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = numpy.zeros(0, int)
            second_objects = numpy.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert isinstance(workspace, Workspace)
        m = workspace.measurements
        assert isinstance(m, Measurements)
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (
                M_FIRST_CLOSEST_DISTANCE,
                numpy.sqrt(first_x_vector**2 + first_y_vector**2),
            ),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (
                M_SECOND_CLOSEST_DISTANCE,
                numpy.sqrt(second_x_vector**2 + second_y_vector**2),
            ),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle),
            (M_PERCENT_TOUCHING, percent_touching),
        ]
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name), data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num,
                NEIGHBORS,
                self.object_name.value,
                self.object_name.value
                if self.neighbors_are_objects else self.neighbors_name.value,
                m.image_set_number * numpy.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * numpy.ones(second_objects.shape, int),
                second_objects,
            )

        labels = kept_labels

        neighbor_count_image = numpy.zeros(labels.shape, int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask] - 1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image

        percent_touching_image = numpy.zeros(labels.shape)
        percent_touching_image[object_mask] = percent_touching[object_indexes]
        workspace.display_data.percent_touching_image = percent_touching_image

        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            count_image = Image(img, masking_objects=objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = "Blues"
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            touching_image = Image(img, masking_objects=objects)
            image_set.add(self.touching_image_name.value, touching_image)
        else:
            percent_touching_cm_name = "Oranges"
            percent_touching_cm = matplotlib.cm.get_cmap(
                percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.neighbor_labels = neighbor_labels
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
            workspace.display_data.dimensions = dimensions
Exemplo n.º 17
0
    def do_measurements(self, workspace, image_name, object_name,
                        center_object_name, center_choice,
                        bin_count_settings, dd):
        '''Perform the radial measurements on the image set

        workspace - workspace that holds images / objects
        image_name - make measurements on this image
        object_name - make measurements on these objects
        center_object_name - use the centers of these related objects as
                      the centers for radial measurements. None to use the
                      objects themselves.
        center_choice - the user's center choice for this object:
                      C_SELF, C_CENTERS_OF_OBJECTS or C_EDGES_OF_OBJECTS.
        bin_count_settings - the bin count settings group
        d - a dictionary for saving reusable partial results

        returns one statistics tuple per ring.
        '''
        assert isinstance(workspace, cpw.Workspace)
        assert isinstance(workspace.object_set, cpo.ObjectSet)
        bin_count = bin_count_settings.bin_count.value
        wants_scaled = bin_count_settings.wants_scaled.value
        maximum_radius = bin_count_settings.maximum_radius.value

        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        labels, pixel_data = cpo.crop_labels_and_image(objects.segmented,
                                                       image.pixel_data)
        nobjects = np.max(objects.segmented)
        measurements = workspace.measurements
        assert isinstance(measurements, cpmeas.Measurements)
        heatmaps = {}
        for heatmap in self.heatmaps:
            if heatmap.object_name.get_objects_name() == object_name and \
                            image_name == heatmap.image_name.get_image_name() and \
                            heatmap.get_number_of_bins() == bin_count:
                dd[id(heatmap)] = \
                    heatmaps[MEASUREMENT_ALIASES[heatmap.measurement.value]] = \
                    np.zeros(labels.shape)
        if nobjects == 0:
            for bin in range(1, bin_count + 1):
                for feature in (F_FRAC_AT_D, F_MEAN_FRAC, F_RADIAL_CV):
                    feature_name = (
                        (feature + FF_GENERIC) % (image_name, bin, bin_count))
                    measurements.add_measurement(
                            object_name, "_".join([M_CATEGORY, feature_name]),
                            np.zeros(0))
                    if not wants_scaled:
                        measurement_name = "_".join([M_CATEGORY, feature,
                                                     image_name, FF_OVERFLOW])
                        measurements.add_measurement(
                                object_name, measurement_name, np.zeros(0))
            return [(image_name, object_name, "no objects", "-", "-", "-", "-")]
        name = (object_name if center_object_name is None
                else "%s_%s" % (object_name, center_object_name))
        if dd.has_key(name):
            normalized_distance, i_center, j_center, good_mask = dd[name]
        else:
            d_to_edge = distance_to_edge(labels)
            if center_object_name is not None:
                #
                # Use the center of the centering objects to assign a center
                # to each labeled pixel using propagation
                #
                center_objects = workspace.object_set.get_objects(center_object_name)
                center_labels, cmask = cpo.size_similarly(
                        labels, center_objects.segmented)
                pixel_counts = fix(scind.sum(
                        np.ones(center_labels.shape),
                        center_labels,
                        np.arange(1, np.max(center_labels) + 1, dtype=np.int32)))
                good = pixel_counts > 0
                i, j = (centers_of_labels(center_labels) + .5).astype(int)
                ig = i[good]
                jg = j[good]
                lg = np.arange(1, len(i) + 1)[good]
                if center_choice == C_CENTERS_OF_OTHER:
                    #
                    # Reduce the propagation labels to the centers of
                    # the centering objects
                    #
                    center_labels = np.zeros(center_labels.shape, int)
                    center_labels[ig, jg] = lg
                cl, d_from_center = propagate(np.zeros(center_labels.shape),
                                              center_labels,
                                              labels != 0, 1)
                #
                # Erase the centers that fall outside of labels
                #
                cl[labels == 0] = 0
                #
                # If objects are hollow or crescent-shaped, there may be
                # objects without center labels. As a backup, find the
                # center that is the closest to the center of mass.
                #
                missing_mask = (labels != 0) & (cl == 0)
                missing_labels = np.unique(labels[missing_mask])
                if len(missing_labels):
                    all_centers = centers_of_labels(labels)
                    missing_i_centers, missing_j_centers = \
                        all_centers[:, missing_labels - 1]
                    di = missing_i_centers[:, np.newaxis] - ig[np.newaxis, :]
                    dj = missing_j_centers[:, np.newaxis] - jg[np.newaxis, :]
                    missing_best = lg[np.argsort((di * di + dj * dj,))[:, 0]]
                    best = np.zeros(np.max(labels) + 1, int)
                    best[missing_labels] = missing_best
                    cl[missing_mask] = best[labels[missing_mask]]
                    #
                    # Now compute the crow-flies distance to the centers
                    # of these pixels from whatever center was assigned to
                    # the object.
                    #
                    iii, jjj = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
                    di = iii[missing_mask] - i[cl[missing_mask] - 1]
                    dj = jjj[missing_mask] - j[cl[missing_mask] - 1]
                    d_from_center[missing_mask] = np.sqrt(di * di + dj * dj)
            else:
                # Find the point in each object farthest away from the edge.
                # This does better than the centroid:
                # * The center is within the object
                # * The center tends to be an interesting point, like the
                #   center of the nucleus or the center of one or the other
                #   of two touching cells.
                #
                i, j = maximum_position_of_labels(d_to_edge, labels, objects.indices)
                center_labels = np.zeros(labels.shape, int)
                center_labels[i, j] = labels[i, j]
                #
                # Use the coloring trick here to process touching objects
                # in separate operations
                #
                colors = color_labels(labels)
                ncolors = np.max(colors)
                d_from_center = np.zeros(labels.shape)
                cl = np.zeros(labels.shape, int)
                for color in range(1, ncolors + 1):
                    mask = colors == color
                    l, d = propagate(np.zeros(center_labels.shape),
                                     center_labels,
                                     mask, 1)
                    d_from_center[mask] = d[mask]
                    cl[mask] = l[mask]
            good_mask = cl > 0
            if center_choice == C_EDGES_OF_OTHER:
                # Exclude pixels within the centering objects
                # when performing calculations from the centers
                good_mask = good_mask & (center_labels == 0)
            i_center = np.zeros(cl.shape)
            i_center[good_mask] = i[cl[good_mask] - 1]
            j_center = np.zeros(cl.shape)
            j_center[good_mask] = j[cl[good_mask] - 1]

            normalized_distance = np.zeros(labels.shape)
            if wants_scaled:
                total_distance = d_from_center + d_to_edge
                normalized_distance[good_mask] = (d_from_center[good_mask] /
                                                  (total_distance[good_mask] + .001))
            else:
                normalized_distance[good_mask] = \
                    d_from_center[good_mask] / maximum_radius
            dd[name] = [normalized_distance, i_center, j_center, good_mask]
        ngood_pixels = np.sum(good_mask)
        good_labels = labels[good_mask]
        bin_indexes = (normalized_distance * bin_count).astype(int)
        bin_indexes[bin_indexes > bin_count] = bin_count
        labels_and_bins = (good_labels - 1, bin_indexes[good_mask])
        histogram = coo_matrix((pixel_data[good_mask], labels_and_bins),
                               (nobjects, bin_count + 1)).toarray()
        sum_by_object = np.sum(histogram, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_distance = histogram / sum_by_object_per_bin
        number_at_distance = coo_matrix((np.ones(ngood_pixels), labels_and_bins),
                                        (nobjects, bin_count + 1)).toarray()
        object_mask = number_at_distance > 0
        sum_by_object = np.sum(number_at_distance, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_bin = number_at_distance / sum_by_object_per_bin
        mean_pixel_fraction = fraction_at_distance / (fraction_at_bin +
                                                      np.finfo(float).eps)
        masked_fraction_at_distance = masked_array(fraction_at_distance,
                                                   ~object_mask)
        masked_mean_pixel_fraction = masked_array(mean_pixel_fraction,
                                                  ~object_mask)
        # Anisotropy calculation.  Split each cell into eight wedges, then
        # compute coefficient of variation of the wedges' mean intensities
        # in each ring.
        #
        # Compute each pixel's delta from the center object's centroid
        i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
        imask = i[good_mask] > i_center[good_mask]
        jmask = j[good_mask] > j_center[good_mask]
        absmask = (abs(i[good_mask] - i_center[good_mask]) >
                   abs(j[good_mask] - j_center[good_mask]))
        radial_index = (imask.astype(int) + jmask.astype(int) * 2 +
                        absmask.astype(int) * 4)
        statistics = []

        for bin in range(bin_count + (0 if wants_scaled else 1)):
            bin_mask = (good_mask & (bin_indexes == bin))
            bin_pixels = np.sum(bin_mask)
            bin_labels = labels[bin_mask]
            bin_radial_index = radial_index[bin_indexes[good_mask] == bin]
            labels_and_radii = (bin_labels - 1, bin_radial_index)
            radial_values = coo_matrix((pixel_data[bin_mask],
                                        labels_and_radii),
                                       (nobjects, 8)).toarray()
            pixel_count = coo_matrix((np.ones(bin_pixels), labels_and_radii),
                                     (nobjects, 8)).toarray()
            mask = pixel_count == 0
            radial_means = masked_array(radial_values / pixel_count, mask)
            radial_cv = np.std(radial_means, 1) / np.mean(radial_means, 1)
            radial_cv[np.sum(~mask, 1) == 0] = 0
            for measurement, feature, overflow_feature in (
                    (fraction_at_distance[:, bin], MF_FRAC_AT_D, OF_FRAC_AT_D),
                    (mean_pixel_fraction[:, bin], MF_MEAN_FRAC, OF_MEAN_FRAC),
                    (np.array(radial_cv), MF_RADIAL_CV, OF_RADIAL_CV)):

                if bin == bin_count:
                    measurement_name = overflow_feature % image_name
                else:
                    measurement_name = feature % (image_name, bin + 1, bin_count)
                measurements.add_measurement(object_name,
                                             measurement_name,
                                             measurement)
                if feature in heatmaps:
                    heatmaps[feature][bin_mask] = measurement[bin_labels - 1]
            radial_cv.mask = np.sum(~mask, 1) == 0
            bin_name = str(bin + 1) if bin < bin_count else "Overflow"
            statistics += [(image_name, object_name, bin_name, str(bin_count),
                            round(np.mean(masked_fraction_at_distance[:, bin]), 4),
                            round(np.mean(masked_mean_pixel_fraction[:, bin]), 4),
                            round(np.mean(radial_cv), 4))]
        return statistics
Exemplo n.º 18
0
    def run(self, workspace):
        '''Run the module on the image set'''
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(skeleton_name,
                                                       must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels, footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:

            def size_fn(area, is_object):
                return (~is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(combined_skel,
                                                     ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1] & combined_skel[1:, :-1]
                    & combined_skel[:-1, 1:] & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # emanating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(
                scind.sum(branching_counts, nearby_labels,
                          label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0, ), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(
                scind.sum(branch_points, outside_labels, label_range))
        else:
            branch_counts = np.zeros((0, ), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels,
                                       label_range))
        else:
            end_counts = np.zeros((0, ), int)
        #
        # Calculate the distances
        #
        total_distance = morph.skeleton_length(dlabels * outside_skel,
                                               label_range)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_OBJSKELETON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join(
            (C_OBJSKELETON, F_NUMBER_NON_TRUNK_BRANCHES, skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join(
            (C_OBJSKELETON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        feature = "_".join(
            (C_OBJSKELETON, F_TOTAL_OBJSKELETON_LENGTH, skeleton_name))
        m[seed_objects_name, feature] = total_distance
        #
        # Collect the graph information
        #
        if self.wants_objskeleton_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_objskeleton_graph(
                combined_skel, dlabels, trunk_mask,
                branch_points & ~trunk_mask, end_points,
                intensity_image.pixel_data)

            image_number = workspace.measurements.image_set_number

            edge_path, vertex_path = self.get_graph_file_paths(
                m, m.image_number)
            workspace.interaction_request(self,
                                          m.image_number,
                                          edge_path,
                                          edge_graph,
                                          vertex_path,
                                          vertex_graph,
                                          headless_ok=True)

            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros(
                (skeleton.shape[0], skeleton.shape[1], 3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= .875
            branchpoint_image[dilated_labels != 0, :] += .1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image, parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
Exemplo n.º 19
0
    def run_image_pair_objects(self, workspace, first_image_name,
                               second_image_name, object_name):
        '''Calculate per-object correlations between intensities in two images'''
        first_image = workspace.image_set.get_image(first_image_name,
                                                    must_be_grayscale=True)
        second_image = workspace.image_set.get_image(second_image_name,
                                                     must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        #
        # Crop both images to the size of the labels matrix
        #
        labels = objects.segmented
        try:
            first_pixels = objects.crop_image_similarly(first_image.pixel_data)
            first_mask = objects.crop_image_similarly(first_image.mask)
        except ValueError:
            first_pixels, m1 = cpo.size_similarly(labels,
                                                  first_image.pixel_data)
            first_mask, m1 = cpo.size_similarly(labels, first_image.mask)
            first_mask[~m1] = False
        try:
            second_pixels = objects.crop_image_similarly(
                second_image.pixel_data)
            second_mask = objects.crop_image_similarly(second_image.mask)
        except ValueError:
            second_pixels, m1 = cpo.size_similarly(labels,
                                                   second_image.pixel_data)
            second_mask, m1 = cpo.size_similarly(labels, second_image.mask)
            second_mask[~m1] = False
        mask = ((labels > 0) & first_mask & second_mask)
        first_pixels = first_pixels[mask]
        second_pixels = second_pixels[mask]
        labels = labels[mask]
        n_objects = objects.count
        # Handle case when both images for the correlation are completely masked out

        if ((n_objects == 0)):
            corr = np.zeros((0, ))
        elif ((np.where(mask)[0].__len__() == 0)):
            corr = np.zeros((n_objects, ))
            corr[:] = np.NaN
        else:
            #
            # The correlation is sum((x-mean(x))(y-mean(y)) /
            #                         ((n-1) * std(x) *std(y)))
            #
            lrange = np.arange(n_objects, dtype=np.int32) + 1
            area = fix(scind.sum(np.ones_like(labels), labels, lrange))
            mean1 = fix(scind.mean(first_pixels, labels, lrange))
            mean2 = fix(scind.mean(second_pixels, labels, lrange))
            #
            # Calculate the standard deviation times the population.
            #
            std1 = np.sqrt(
                fix(
                    scind.sum((first_pixels - mean1[labels - 1])**2, labels,
                              lrange)))
            std2 = np.sqrt(
                fix(
                    scind.sum((second_pixels - mean2[labels - 1])**2, labels,
                              lrange)))
            x = first_pixels - mean1[labels - 1]  # x - mean(x)
            y = second_pixels - mean2[labels - 1]  # y - mean(y)
            corr = fix(
                scind.sum(x * y / (std1[labels - 1] * std2[labels - 1]),
                          labels, lrange))
            # Explicitly set the correlation to NaN for masked objects
            corr[scind.sum(1, labels, lrange) == 0] = np.NaN
        measurement = ("Correlation_Correlation_%s_%s" %
                       (first_image_name, second_image_name))
        workspace.measurements.add_measurement(object_name, measurement, corr)
        if n_objects == 0:
            return [[
                first_image_name, second_image_name, object_name,
                "Mean correlation", "-"
            ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Median correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Min correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Max correlation", "-"
                    ]]
        else:
            return [[
                first_image_name, second_image_name, object_name,
                "Mean correlation",
                "%.2f" % np.mean(corr)
            ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Median correlation",
                        "%.2f" % np.median(corr)
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Min correlation",
                        "%.2f" % np.min(corr)
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Max correlation",
                        "%.2f" % np.max(corr)
                    ]]
    def run(self, workspace):
        if self.show_window:
            workspace.display_data.col_labels = ("Image", "channel", "Object",
                                                 "Feature", "Mean", "Median",
                                                 "STD")
            workspace.display_data.statistics = statistics = []
        for im in self.images:
            image_name = im.name
            image = workspace.image_set.get_image(image_name.value,
                                                  must_be_grayscale=False)
            nchan = im.nchannels.value
            for channel in range(nchan):
                for object_name in [obj.name for obj in self.objects]:
                    # Need to refresh image after each iteration...
                    if nchan == 1:
                        img = image.pixel_data.copy()
                    else:
                        img = image.pixel_data[:, :, channel].squeeze().copy()
                    if image.has_mask:
                        masked_image = img.copy()
                        masked_image[~image.mask] = 0
                    else:
                        masked_image = img
                    objects = workspace.object_set.get_objects(
                        object_name.value)
                    nobjects = objects.count
                    integrated_intensity = np.zeros((nobjects, ))
                    integrated_intensity_edge = np.zeros((nobjects, ))
                    mean_intensity = np.zeros((nobjects, ))
                    mean_intensity_edge = np.zeros((nobjects, ))
                    std_intensity = np.zeros((nobjects, ))
                    std_intensity_edge = np.zeros((nobjects, ))
                    min_intensity = np.zeros((nobjects, ))
                    min_intensity_edge = np.zeros((nobjects, ))
                    max_intensity = np.zeros((nobjects, ))
                    max_intensity_edge = np.zeros((nobjects, ))
                    mass_displacement = np.zeros((nobjects, ))
                    lower_quartile_intensity = np.zeros((nobjects, ))
                    median_intensity = np.zeros((nobjects, ))
                    mad_intensity = np.zeros((nobjects, ))
                    upper_quartile_intensity = np.zeros((nobjects, ))
                    cmi_x = np.zeros((nobjects, ))
                    cmi_y = np.zeros((nobjects, ))
                    max_x = np.zeros((nobjects, ))
                    max_y = np.zeros((nobjects, ))
                    for labels, lindexes in objects.get_labels():
                        lindexes = lindexes[lindexes != 0]
                        labels, img = cpo.crop_labels_and_image(labels, img)
                        _, masked_image = cpo.crop_labels_and_image(
                            labels, masked_image)
                        outlines = cpmo.outline(labels)

                        if image.has_mask:
                            _, mask = cpo.crop_labels_and_image(
                                labels, image.mask)
                            masked_labels = labels.copy()
                            masked_labels[~mask] = 0
                            masked_outlines = outlines.copy()
                            masked_outlines[~mask] = 0
                        else:
                            masked_labels = labels
                            masked_outlines = outlines

                        lmask = masked_labels > 0 & np.isfinite(
                            img)  # Ignore NaNs, Infs
                        has_objects = np.any(lmask)
                        if has_objects:
                            limg = img[lmask]
                            llabels = labels[lmask]
                            mesh_y, mesh_x = np.mgrid[0:masked_image.shape[0],
                                                      0:masked_image.shape[1]]
                            mesh_x = mesh_x[lmask]
                            mesh_y = mesh_y[lmask]
                            lcount = fix(
                                nd.sum(np.ones(len(limg)), llabels, lindexes))
                            integrated_intensity[lindexes - 1] = \
                                fix(nd.sum(limg, llabels, lindexes))
                            mean_intensity[lindexes - 1] = \
                                integrated_intensity[lindexes - 1] / lcount
                            std_intensity[lindexes - 1] = np.sqrt(
                                fix(
                                    nd.mean((limg -
                                             mean_intensity[llabels - 1])**2,
                                            llabels, lindexes)))
                            min_intensity[lindexes - 1] = fix(
                                nd.minimum(limg, llabels, lindexes))
                            max_intensity[lindexes - 1] = fix(
                                nd.maximum(limg, llabels, lindexes))
                            # Compute the position of the intensity maximum
                            max_position = np.array(fix(
                                nd.maximum_position(limg, llabels, lindexes)),
                                                    dtype=int)
                            max_position = np.reshape(
                                max_position, (max_position.shape[0], ))
                            max_x[lindexes - 1] = mesh_x[max_position]
                            max_y[lindexes - 1] = mesh_y[max_position]
                            # The mass displacement is the distance between the center
                            # of mass of the binary image and of the intensity image. The
                            # center of mass is the average X or Y for the binary image
                            # and the sum of X or Y * intensity / integrated intensity
                            cm_x = fix(nd.mean(mesh_x, llabels, lindexes))
                            cm_y = fix(nd.mean(mesh_y, llabels, lindexes))

                            i_x = fix(nd.sum(mesh_x * limg, llabels, lindexes))
                            i_y = fix(nd.sum(mesh_y * limg, llabels, lindexes))
                            cmi_x[lindexes -
                                  1] = i_x / integrated_intensity[lindexes - 1]
                            cmi_y[lindexes -
                                  1] = i_y / integrated_intensity[lindexes - 1]
                            diff_x = cm_x - cmi_x[lindexes - 1]
                            diff_y = cm_y - cmi_y[lindexes - 1]
                            mass_displacement[lindexes - 1] = \
                                np.sqrt(diff_x * diff_x + diff_y * diff_y)
                            #
                            # Sort the intensities by label, then intensity.
                            # For each label, find the index above and below
                            # the 25%, 50% and 75% mark and take the weighted
                            # average.
                            #
                            order = np.lexsort((limg, llabels))
                            areas = lcount.astype(int)
                            indices = np.cumsum(areas) - areas
                            for dest, fraction in ((lower_quartile_intensity,
                                                    1.0 / 4.0),
                                                   (median_intensity,
                                                    1.0 / 2.0),
                                                   (upper_quartile_intensity,
                                                    3.0 / 4.0)):
                                qindex = indices.astype(
                                    float) + areas * fraction
                                qfraction = qindex - np.floor(qindex)
                                qindex = qindex.astype(int)
                                qmask = qindex < indices + areas - 1
                                qi = qindex[qmask]
                                qf = qfraction[qmask]
                                dest[lindexes[qmask] -
                                     1] = (limg[order[qi]] * (1 - qf) +
                                           limg[order[qi + 1]] * qf)
                                #
                                # In some situations (e.g. only 3 points), there may
                                # not be an upper bound.
                                #
                                qmask = (~qmask) & (areas > 0)
                                dest[lindexes[qmask] -
                                     1] = limg[order[qindex[qmask]]]
                            #
                            # Once again, for the MAD
                            #
                            madimg = np.abs(limg -
                                            median_intensity[llabels - 1])
                            order = np.lexsort((madimg, llabels))
                            qindex = indices.astype(float) + areas / 2.0
                            qfraction = qindex - np.floor(qindex)
                            qindex = qindex.astype(int)
                            qmask = qindex < indices + areas - 1
                            qi = qindex[qmask]
                            qf = qfraction[qmask]
                            mad_intensity[lindexes[qmask] -
                                          1] = (madimg[order[qi]] * (1 - qf) +
                                                madimg[order[qi + 1]] * qf)
                            qmask = (~qmask) & (areas > 0)
                            mad_intensity[lindexes[qmask] -
                                          1] = madimg[order[qindex[qmask]]]

                        emask = masked_outlines > 0
                        eimg = img[emask]
                        elabels = labels[emask]
                        has_edge = len(eimg) > 0
                        if has_edge:
                            ecount = fix(
                                nd.sum(np.ones(len(eimg)), elabels, lindexes))
                            integrated_intensity_edge[lindexes - 1] = \
                                fix(nd.sum(eimg, elabels, lindexes))
                            mean_intensity_edge[lindexes - 1] = \
                                integrated_intensity_edge[lindexes - 1] / ecount
                            std_intensity_edge[lindexes - 1] = \
                                np.sqrt(fix(nd.mean(
                                        (eimg - mean_intensity_edge[elabels - 1]) ** 2,
                                        elabels, lindexes)))
                            min_intensity_edge[lindexes - 1] = fix(
                                nd.minimum(eimg, elabels, lindexes))
                            max_intensity_edge[lindexes - 1] = fix(
                                nd.maximum(eimg, elabels, lindexes))
                    m = workspace.measurements
                    for category, feature_name, measurement in \
                            ((INTENSITY, INTEGRATED_INTENSITY, integrated_intensity),
                             (INTENSITY, MEAN_INTENSITY, mean_intensity),
                             (INTENSITY, STD_INTENSITY, std_intensity),
                             (INTENSITY, MIN_INTENSITY, min_intensity),
                             (INTENSITY, MAX_INTENSITY, max_intensity),
                             (INTENSITY, INTEGRATED_INTENSITY_EDGE, integrated_intensity_edge),
                             (INTENSITY, MEAN_INTENSITY_EDGE, mean_intensity_edge),
                             (INTENSITY, STD_INTENSITY_EDGE, std_intensity_edge),
                             (INTENSITY, MIN_INTENSITY_EDGE, min_intensity_edge),
                             (INTENSITY, MAX_INTENSITY_EDGE, max_intensity_edge),
                             (INTENSITY, MASS_DISPLACEMENT, mass_displacement),
                             (INTENSITY, LOWER_QUARTILE_INTENSITY, lower_quartile_intensity),
                             (INTENSITY, MEDIAN_INTENSITY, median_intensity),
                             (INTENSITY, MAD_INTENSITY, mad_intensity),
                             (INTENSITY, UPPER_QUARTILE_INTENSITY, upper_quartile_intensity),
                             (C_LOCATION, LOC_CMI_X, cmi_x),
                             (C_LOCATION, LOC_CMI_Y, cmi_y),
                             (C_LOCATION, LOC_MAX_X, max_x),
                             (C_LOCATION, LOC_MAX_Y, max_y)):

                        measurement_name = "%s_%s_%s_c%s" % (
                            category, feature_name, image_name.value,
                            str(channel + 1))
                        m.add_measurement(object_name.value, measurement_name,
                                          measurement)
                        if self.show_window and len(measurement) > 0:
                            statistics.append(
                                (image_name.value, 'c' + str(channel + 1),
                                 object_name.value, feature_name,
                                 np.round(np.mean(measurement), 3),
                                 np.round(np.median(measurement),
                                          3), np.round(np.std(measurement),
                                                       3)))
Exemplo n.º 21
0
    def run_on_objects(self, object_name, workspace):
        """Run, computing the area measurements for a single map of objects"""
        objects = workspace.get_objects(object_name)
        assert isinstance(objects, cpo.Objects)
        #
        # Do the ellipse-related measurements
        #
        i, j, l = objects.ijv.transpose()
        centers, eccentricity, major_axis_length, minor_axis_length, \
            theta, compactness =\
            ellipse_from_second_moments_ijv(i, j, 1, l, objects.indices, True)
        del i
        del j
        del l
        self.record_measurement(workspace, object_name,
                                F_ECCENTRICITY, eccentricity)
        self.record_measurement(workspace, object_name,
                                F_MAJOR_AXIS_LENGTH, major_axis_length)
        self.record_measurement(workspace, object_name, 
                                F_MINOR_AXIS_LENGTH, minor_axis_length)
        self.record_measurement(workspace, object_name, F_ORIENTATION, 
                                theta * 180 / np.pi)
        self.record_measurement(workspace, object_name, F_COMPACTNESS,
                                compactness)
        is_first = False
        if len(objects.indices) == 0:
            nobjects = 0
        else:
            nobjects = np.max(objects.indices)
        mcenter_x = np.zeros(nobjects)
        mcenter_y = np.zeros(nobjects)
        mextent = np.zeros(nobjects)
        mperimeters = np.zeros(nobjects)
        msolidity = np.zeros(nobjects)
        euler = np.zeros(nobjects)
        max_radius = np.zeros(nobjects)
        median_radius = np.zeros(nobjects)
        mean_radius = np.zeros(nobjects)
        min_feret_diameter = np.zeros(nobjects)
        max_feret_diameter = np.zeros(nobjects)
        zernike_numbers = self.get_zernike_numbers()
        zf = {}
        for n,m in zernike_numbers:
            zf[(n,m)] = np.zeros(nobjects)
        if nobjects > 0:
            chulls, chull_counts = convex_hull_ijv(objects.ijv, objects.indices)
            for labels, indices in objects.get_labels():
                to_indices = indices-1
                distances = distance_to_edge(labels)
                mcenter_y[to_indices], mcenter_x[to_indices] =\
                         maximum_position_of_labels(distances, labels, indices)
                max_radius[to_indices] = fix(scind.maximum(
                    distances, labels, indices))
                mean_radius[to_indices] = fix(scind.mean(
                    distances, labels, indices))
                median_radius[to_indices] = median_of_labels(
                    distances, labels, indices)
                #
                # The extent (area / bounding box area)
                #
                mextent[to_indices] = calculate_extents(labels, indices)
                #
                # The perimeter distance
                #
                mperimeters[to_indices] = calculate_perimeters(labels, indices)
                #
                # Solidity
                #
                msolidity[to_indices] = calculate_solidity(labels, indices)
                #
                # Euler number
                #
                euler[to_indices] = euler_number(labels, indices)
                #
                # Zernike features
                #
                zf_l = cpmz.zernike(zernike_numbers, labels, indices)
                for (n,m), z in zip(zernike_numbers, zf_l.transpose()):
                    zf[(n,m)][to_indices] = z
            #
            # Form factor
            #
            ff = 4.0 * np.pi * objects.areas / mperimeters**2
            #
            # Feret diameter
            #
            min_feret_diameter, max_feret_diameter = \
                feret_diameter(chulls, chull_counts, objects.indices)
            
        else:
            ff = np.zeros(0)

        for f, m in ([(F_AREA, objects.areas),
                      (F_CENTER_X, mcenter_x),
                      (F_CENTER_Y, mcenter_y),
                      (F_EXTENT, mextent),
                      (F_PERIMETER, mperimeters),
                      (F_SOLIDITY, msolidity),
                      (F_FORM_FACTOR, ff),
                      (F_EULER_NUMBER, euler),
                      (F_MAXIMUM_RADIUS, max_radius),
                      (F_MEAN_RADIUS, mean_radius),
                      (F_MEDIAN_RADIUS, median_radius),
                      (F_MIN_FERET_DIAMETER, min_feret_diameter),
                      (F_MAX_FERET_DIAMETER, max_feret_diameter)] +
                     [(self.get_zernike_name((n,m)), zf[(n,m)])
                       for n,m in zernike_numbers]):
            self.record_measurement(workspace, object_name, f, m) 
Exemplo n.º 22
0
    def run_on_objects(self, object_name, workspace):
        """Run, computing the area measurements for a single map of objects"""
        objects = workspace.get_objects(object_name)
        assert isinstance(objects, cpo.Objects)
        #
        # Do the ellipse-related measurements
        #
        i, j, l = objects.ijv.transpose()
        centers, eccentricity, major_axis_length, minor_axis_length, \
            theta, compactness =\
            ellipse_from_second_moments_ijv(i, j, 1, l, objects.indices, True)
        del i
        del j
        del l
        self.record_measurement(workspace, object_name, F_ECCENTRICITY,
                                eccentricity)
        self.record_measurement(workspace, object_name, F_MAJOR_AXIS_LENGTH,
                                major_axis_length)
        self.record_measurement(workspace, object_name, F_MINOR_AXIS_LENGTH,
                                minor_axis_length)
        self.record_measurement(workspace, object_name, F_ORIENTATION,
                                theta * 180 / np.pi)
        self.record_measurement(workspace, object_name, F_COMPACTNESS,
                                compactness)
        is_first = False
        if len(objects.indices) == 0:
            nobjects = 0
        else:
            nobjects = np.max(objects.indices)
        mcenter_x = np.zeros(nobjects)
        mcenter_y = np.zeros(nobjects)
        mextent = np.zeros(nobjects)
        mperimeters = np.zeros(nobjects)
        msolidity = np.zeros(nobjects)
        euler = np.zeros(nobjects)
        max_radius = np.zeros(nobjects)
        median_radius = np.zeros(nobjects)
        mean_radius = np.zeros(nobjects)
        min_feret_diameter = np.zeros(nobjects)
        max_feret_diameter = np.zeros(nobjects)
        zernike_numbers = self.get_zernike_numbers()
        zf = {}
        for n, m in zernike_numbers:
            zf[(n, m)] = np.zeros(nobjects)
        if nobjects > 0:
            chulls, chull_counts = convex_hull_ijv(objects.ijv,
                                                   objects.indices)
            for labels, indices in objects.get_labels():
                to_indices = indices - 1
                distances = distance_to_edge(labels)
                mcenter_y[to_indices], mcenter_x[to_indices] =\
                         maximum_position_of_labels(distances, labels, indices)
                max_radius[to_indices] = fix(
                    scind.maximum(distances, labels, indices))
                mean_radius[to_indices] = fix(
                    scind.mean(distances, labels, indices))
                median_radius[to_indices] = median_of_labels(
                    distances, labels, indices)
                #
                # The extent (area / bounding box area)
                #
                mextent[to_indices] = calculate_extents(labels, indices)
                #
                # The perimeter distance
                #
                mperimeters[to_indices] = calculate_perimeters(labels, indices)
                #
                # Solidity
                #
                msolidity[to_indices] = calculate_solidity(labels, indices)
                #
                # Euler number
                #
                euler[to_indices] = euler_number(labels, indices)
                #
                # Zernike features
                #
                zf_l = cpmz.zernike(zernike_numbers, labels, indices)
                for (n, m), z in zip(zernike_numbers, zf_l.transpose()):
                    zf[(n, m)][to_indices] = z
            #
            # Form factor
            #
            ff = 4.0 * np.pi * objects.areas / mperimeters**2
            #
            # Feret diameter
            #
            min_feret_diameter, max_feret_diameter = \
                feret_diameter(chulls, chull_counts, objects.indices)

        else:
            ff = np.zeros(0)

        for f, m in ([(F_AREA, objects.areas), (F_CENTER_X, mcenter_x),
                      (F_CENTER_Y, mcenter_y), (F_EXTENT, mextent),
                      (F_PERIMETER, mperimeters), (F_SOLIDITY, msolidity),
                      (F_FORM_FACTOR, ff), (F_EULER_NUMBER, euler),
                      (F_MAXIMUM_RADIUS, max_radius),
                      (F_MEAN_RADIUS, mean_radius),
                      (F_MEDIAN_RADIUS, median_radius),
                      (F_MIN_FERET_DIAMETER, min_feret_diameter),
                      (F_MAX_FERET_DIAMETER, max_feret_diameter)] +
                     [(self.get_zernike_name((n, m)), zf[(n, m)])
                      for n, m in zernike_numbers]):
            self.record_measurement(workspace, object_name, f, m)
    def measure_zernike(self, pixels, labels, indexes, centers, radius, n, m):
        # I'll put some documentation in here to explain what it does.
        # If someone ever wants to call it, their editor might display
        # the documentation.
        '''Measure the intensity of the image with Zernike (N, M)

        pixels - the intensity image to be measured
        labels - the labels matrix that labels each object with an integer
        indexes - the label #s in the image
        centers - the centers of the minimum enclosing circle for each object
        radius - the radius of the minimum enclosing circle for each object
        n, m - the Zernike coefficients.

        See http://en.wikipedia.org/wiki/Zernike_polynomials for an
        explanation of the Zernike polynomials
        '''
        #
        # The strategy here is to operate on the whole array instead
        # of operating on one object at a time. The most important thing
        # is to avoid having to run the Python interpreter once per pixel
        # in the image and the second most important is to avoid running
        # it per object in case there are hundreds of objects.
        #
        # We play lots of indexing tricks here to operate on the whole image.
        # I'll try to explain some - hopefully, you can reuse.
        center_x = centers[:, 1]
        center_y = centers[:, 0]
        #
        # Make up fake values for 0 (the background). This lets us do our
        # indexing tricks. Really, we're going to ignore the background,
        # but we want to do the indexing without ignoring the background
        # because that's easier.
        #
        center_x = np.hstack([[0], center_x])
        center_y = np.hstack([[0], center_y])
        radius = np.hstack([[1], radius])
        #
        # Now get one array that's the y coordinate of each pixel and one
        # that's the x coordinate. This might look stupid and wasteful,
        # but these "arrays" are never actually realized and made into
        # real memory.
        #
        y, x = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
        #
        # Get the x and y coordinates relative to the object centers.
        # This uses Numpy broadcasting. For each pixel, we use the
        # value in the labels matrix as an index into the appropriate
        # one-dimensional array. So we get the value for that object.
        #
        y -= center_y[labels]
        x -= center_x[labels]
        #
        # Zernikes take x and y values from zero to one. We scale the
        # integer coordinate values by dividing them by the radius of
        # the circle. Again, we use the indexing trick to look up the
        # values for each object.
        #
        y = y.astype(float) / radius[labels]
        x = x.astype(float) / radius[labels]
        #
        #################################
        #
        # ZERNIKE POLYNOMIALS
        #
        # Now we can get Zernike polynomials per-pixel where each pixel
        # value is calculated according to its object's MEC.
        #
        # We use a mask of all of the non-zero labels so the calculation
        # runs a little faster.
        #
        zernike_polynomial = construct_zernike_polynomials(
            x, y, np.array([ [ n, m ]]), labels > 0)
        #
        # For historical reasons, CellProfiler didn't multiply by the per/zernike
        # normalizing factor: 2*n + 2 / E / pi where E is 2 if m is zero and 1
        # if m is one. We do it here to aid with the reconstruction
        #
        zernike_polynomial *= (2*n + 2) / (2 if m == 0 else 1) / np.pi
        #
        # Multiply the Zernike polynomial by the image to dissect
        # the image by the Zernike basis set.
        #
        output_pixels = pixels * zernike_polynomial[:,:,0]
        #
        # Finally, we use Scipy to sum the intensities. Scipy has different
        # versions with different quirks. The "fix" function takes all
        # of that into account.
        #
        # The sum function calculates the sum of the pixel values for
        # each pixel in an object, using the labels matrix to name
        # the pixels in an object
        #
        zr = fix(scind.sum(output_pixels.real, labels, indexes))
        zi = fix(scind.sum(output_pixels.imag, labels, indexes))
        #
        # And we're done! Did you like it? Did you get it?
        #
        return zr, zi
Exemplo n.º 24
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        assert isinstance(objects, cpo.Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        assert isinstance(neighbor_objects, cpo.Objects)
        neighbor_labels = neighbor_objects.small_removed_segmented
        #
        # Need to add in labels touching border.
        #
        unedited_segmented = neighbor_objects.unedited_segmented
        touching_border = np.zeros(np.max(unedited_segmented) + 1, bool)
        touching_border[unedited_segmented[0, :]] = True
        touching_border[unedited_segmented[-1, :]] = True
        touching_border[unedited_segmented[:, 0]] = True
        touching_border[unedited_segmented[:, -1]] = True
        touching_border[0] = False
        touching_border_mask = touching_border[unedited_segmented]
        nobjects = np.max(labels)
        nkept_objects = objects.count
        nneighbors = np.max(neighbor_labels)
        if np.any(touching_border) and \
           np.all(~ touching_border_mask[neighbor_labels!=0]):
            # Add the border labels if any were excluded
            touching_border_object_number = np.cumsum(touching_border) + \
                np.max(neighbor_labels)
            touching_border_mask = touching_border_mask & (neighbor_labels == 0)
            neighbor_labels = neighbor_labels.copy().astype(np.int32)
            neighbor_labels[touching_border_mask] = touching_border_object_number[
                unedited_segmented[touching_border_mask]]
        
        neighbor_has_pixels = np.bincount(neighbor_labels.ravel())[1:] > 0
        
        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.segmented)
        neighbor_count = np.zeros((nobjects,))
        pixel_count = np.zeros((nobjects,))
        first_object_number = np.zeros((nobjects,),int)
        second_object_number = np.zeros((nobjects,),int)
        first_x_vector = np.zeros((nobjects,))
        second_x_vector = np.zeros((nobjects,))
        first_y_vector = np.zeros((nobjects,))
        second_y_vector = np.zeros((nobjects,))
        angle = np.zeros((nobjects,))
        percent_touching = np.zeros((nobjects,))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            i,j = scind.distance_transform_edt(labels==0,
                                               return_distances=False,
                                               return_indices=True)
            # Assign each background pixel to the label of its nearest
            # foreground pixel. Assign label to label for foreground.
            labels = labels[i,j]
            expanded_labels = labels  # for display
            distance = 1 # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = np.arange(nobjects, dtype=np.int32)+1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(scind.sum(np.ones(labels.shape),labels, object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(scind.sum(
                np.ones(labels.shape), perimeter_outlines, object_indexes))
                                       
            i,j = np.mgrid[0:nobjects,0:nneighbors]
            distance_matrix = np.sqrt((ocenters[i,0] - ncenters[j,0])**2 +
                                      (ocenters[i,1] - ncenters[j,1])**2)
            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            if distance_matrix.shape[1] == 1:
                # a little buggy, lexsort assumes that a 2-d array of
                # second dimension = 1 is a 1-d array
                order = np.zeros(distance_matrix.shape, int)
            else:
                order = np.lexsort([distance_matrix])
            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index,1] - ocenters[:,1]
            first_y_vector = ncenters[first_object_index,0] - ocenters[:,0]
            if nneighbors > first_neighbor+1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,1] - ocenters[:,1]
                second_y_vector = ncenters[second_object_index,0] - ocenters[:,0]
                v1 = np.array((first_x_vector,first_y_vector))
                v2 = np.array((second_x_vector,second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = (np.sum(v1*v2,0) / 
                       np.sqrt(np.sum(v1**2,0)*np.sum(v2**2,0)))
                angle = np.arccos(dot) * 180. / np.pi
            
            # Make the structuring element for dilation
            strel = strel_disk(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            strel_touching = strel_disk(distance + .5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            i,j = np.mgrid[0:labels.shape[0],0:labels.shape[1]]
            min_i, max_i, min_i_pos, max_i_pos =\
                scind.extrema(i,labels,object_indexes)
            min_j, max_j, min_j_pos, max_j_pos =\
                scind.extrema(j,labels,object_indexes)
            min_i = np.maximum(fix(min_i)-distance,0).astype(int)
            max_i = np.minimum(fix(max_i)+distance+1,labels.shape[0]).astype(int)
            min_j = np.maximum(fix(min_j)-distance,0).astype(int)
            max_j = np.minimum(fix(max_j)+distance+1,labels.shape[1]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g. not renumbered.
                    #
                    continue
                index = object_number - 1
                patch = labels[min_i[index]:max_i[index],
                               min_j[index]:max_j[index]]
                npatch = neighbor_labels[min_i[index]:max_i[index],
                                         min_j[index]:max_j[index]]
                #
                # Find the neighbors
                #
                patch_mask = patch==(index+1)
                extended = scind.binary_dilation(patch_mask,strel)
                neighbors = np.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(np.ones(nc,int) * object_number)
                    second_objects.append(neighbors)
                if self.neighbors_are_objects:
                    #
                    # Find the # of overlapping pixels. Dilate the neighbors
                    # and see how many pixels overlap our image. Use a 3x3
                    # structuring element to expand the overlapping edge
                    # into the perimeter.
                    #
                    outline_patch = perimeter_outlines[
                        min_i[index]:max_i[index],
                        min_j[index]:max_j[index]] == object_number
                    extended = scind.binary_dilation(
                        (patch != 0) & (patch != object_number), strel_touching)
                    overlap = np.sum(outline_patch & extended)
                    pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = np.hstack(first_objects)
                reverse_object_numbers = np.zeros(
                    max(np.max(object_numbers), np.max(first_objects)) + 1, int)
                reverse_object_numbers[object_numbers] = np.arange(len(object_numbers)) + 1
                first_objects = reverse_object_numbers[first_objects]
    
                second_objects = np.hstack(second_objects)
                reverse_neighbor_numbers = np.zeros(
                    max(np.max(neighbor_numbers), np.max(second_objects)) + 1, int)
                reverse_neighbor_numbers[neighbor_numbers] = np.arange(len(neighbor_numbers)) + 1
                second_objects= reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects  = second_objects[to_keep]
            else:
                first_objects = np.zeros(0, int)
                second_objects = np.zeros(0, int)
            if self.neighbors_are_objects:
                percent_touching = pixel_count * 100 / perimeters
            else:
                percent_touching = pixel_count * 100.0 / areas
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = np.zeros(nkept_objects, int)
            second_object_number = np.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, np.newaxis], 0] - 
                      ncenters[neighbor_indexes[np.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, np.newaxis], 1] - 
                      ncenters[neighbor_indexes[np.newaxis, :], 1])
                distance_matrix = np.sqrt(di*di + dj*dj)
                distance_matrix[~ has_pixels, :] = np.inf
                distance_matrix[:, ~neighbor_has_pixels] = np.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = np.lexsort([distance_matrix]).astype(
                    first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels,1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels,0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = np.zeros(0, int)
            second_objects = np.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~ has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~ has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert(isinstance(workspace, cpw.Workspace))
        m = workspace.measurements
        assert(isinstance(m, cpmeas.Measurements))
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (M_FIRST_CLOSEST_DISTANCE, np.sqrt(first_x_vector**2+first_y_vector**2)),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (M_SECOND_CLOSEST_DISTANCE, np.sqrt(second_x_vector**2+second_y_vector**2)),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle)]
        if self.neighbors_are_objects:
            features_and_data.append((M_PERCENT_TOUCHING, percent_touching))
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name),
                              data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num, 
                cpmeas.NEIGHBORS,
                self.object_name.value,
                self.object_name.value if self.neighbors_are_objects 
                else self.neighbors_name.value,
                m.image_set_number * np.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * np.ones(second_objects.shape, int),
                second_objects)
                                 
        labels = kept_labels
        
        neighbor_count_image = np.zeros(labels.shape,int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask]-1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image
        
        if self.neighbors_are_objects:
            percent_touching_image = np.zeros(labels.shape)
            percent_touching_image[object_mask] = percent_touching[object_indexes]
            workspace.display_data.percent_touching_image = percent_touching_image
        
        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap = neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:,:,:3]
            img[:,:,0][~ object_mask] = 0
            img[:,:,1][~ object_mask] = 0
            img[:,:,2][~ object_mask] = 0
            count_image = cpi.Image(img, masking_objects = objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = cpprefs.get_default_colormap()
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.neighbors_are_objects and self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap = percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:,:,:3]
            img[:,:,0][~ object_mask] = 0
            img[:,:,1][~ object_mask] = 0
            img[:,:,2][~ object_mask] = 0
            touching_image = cpi.Image(img, masking_objects = objects)
            image_set.add(self.touching_image_name.value,
                          touching_image)
        else:
            percent_touching_cm_name = cpprefs.get_default_colormap()
            percent_touching_cm = matplotlib.cm.get_cmap(percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
Exemplo n.º 25
0
    def run_on_image_setting(self, workspace, image):
        assert isinstance(workspace, cpw.Workspace)
        image_set = workspace.image_set
        measurements = workspace.measurements
        im = image_set.get_image(image.image_name.value,
                                 must_be_grayscale=True)
        #
        # Downsample the image and mask
        #
        new_shape = np.array(im.pixel_data.shape)
        if image.subsample_size.value < 1:
            new_shape = new_shape * image.subsample_size.value
            if im.dimensions is 2:
                i, j = (
                    np.mgrid[0:new_shape[0], 0:new_shape[1]].astype(float) /
                    image.subsample_size.value)
                pixels = scind.map_coordinates(im.pixel_data, (i, j), order=1)
                mask = scind.map_coordinates(im.mask.astype(float),
                                             (i, j)) > .9
            else:
                k, i, j = (np.mgrid[0:new_shape[0], 0:new_shape[1],
                                    0:new_shape[2]].astype(float) /
                           image.subsample_size.value)
                pixels = scind.map_coordinates(im.pixel_data, (k, i, j),
                                               order=1)
                mask = scind.map_coordinates(im.mask.astype(float),
                                             (k, i, j)) > .9
        else:
            pixels = im.pixel_data
            mask = im.mask
        #
        # Remove background pixels using a greyscale tophat filter
        #
        if image.image_sample_size.value < 1:
            back_shape = new_shape * image.image_sample_size.value
            if im.dimensions is 2:
                i, j = (
                    np.mgrid[0:back_shape[0], 0:back_shape[1]].astype(float) /
                    image.image_sample_size.value)
                back_pixels = scind.map_coordinates(pixels, (i, j), order=1)
                back_mask = scind.map_coordinates(mask.astype(float),
                                                  (i, j)) > .9
            else:
                k, i, j = (np.mgrid[0:new_shape[0], 0:new_shape[1],
                                    0:new_shape[2]].astype(float) /
                           image.subsample_size.value)
                back_pixels = scind.map_coordinates(pixels, (k, i, j), order=1)
                back_mask = scind.map_coordinates(mask.astype(float),
                                                  (k, i, j)) > .9
        else:
            back_pixels = pixels
            back_mask = mask
            back_shape = new_shape
        radius = image.element_size.value
        if im.dimensions is 2:
            selem = skimage.morphology.disk(radius, dtype=bool)
        else:
            selem = skimage.morphology.ball(radius, dtype=bool)
        back_pixels_mask = np.zeros_like(back_pixels)
        back_pixels_mask[back_mask == True] = back_pixels[back_mask == True]
        back_pixels = skimage.morphology.erosion(back_pixels_mask, selem=selem)
        back_pixels_mask = np.zeros_like(back_pixels)
        back_pixels_mask[back_mask == True] = back_pixels[back_mask == True]
        back_pixels = skimage.morphology.dilation(back_pixels_mask,
                                                  selem=selem)
        if image.image_sample_size.value < 1:
            if im.dimensions is 2:
                i, j = np.mgrid[0:new_shape[0], 0:new_shape[1]].astype(float)
                #
                # Make sure the mapping only references the index range of
                # back_pixels.
                #
                i *= float(back_shape[0] - 1) / float(new_shape[0] - 1)
                j *= float(back_shape[1] - 1) / float(new_shape[1] - 1)
                back_pixels = scind.map_coordinates(back_pixels, (i, j),
                                                    order=1)
            else:
                k, i, j = np.mgrid[0:new_shape[0], 0:new_shape[1],
                                   0:new_shape[2]].astype(float)
                k *= float(back_shape[0] - 1) / float(new_shape[0] - 1)
                i *= float(back_shape[1] - 1) / float(new_shape[1] - 1)
                j *= float(back_shape[2] - 1) / float(new_shape[2] - 1)
                back_pixels = scind.map_coordinates(back_pixels, (k, i, j),
                                                    order=1)
        pixels -= back_pixels
        pixels[pixels < 0] = 0

        #
        # For each object, build a little record
        #
        class ObjectRecord(object):
            def __init__(self, name):
                self.name = name
                self.labels = workspace.object_set.get_objects(name).segmented
                self.nobjects = np.max(self.labels)
                if self.nobjects != 0:
                    self.range = np.arange(1, np.max(self.labels) + 1)
                    self.labels = self.labels.copy()
                    self.labels[~im.mask] = 0
                    self.current_mean = fix(
                        scind.mean(im.pixel_data, self.labels, self.range))
                    self.start_mean = np.maximum(self.current_mean,
                                                 np.finfo(float).eps)

        object_records = [
            ObjectRecord(ob.objects_name.value) for ob in image.objects
        ]
        #
        # Transcribed from the Matlab module: granspectr function
        #
        # CALCULATES GRANULAR SPECTRUM, ALSO KNOWN AS SIZE DISTRIBUTION,
        # GRANULOMETRY, AND PATTERN SPECTRUM, SEE REF.:
        # J.Serra, Image Analysis and Mathematical Morphology, Vol. 1. Academic Press, London, 1989
        # Maragos,P. "Pattern spectrum and multiscale shape representation", IEEE Transactions on Pattern Analysis and Machine Intelligence, 11, N 7, pp. 701-716, 1989
        # L.Vincent "Granulometries and Opening Trees", Fundamenta Informaticae, 41, No. 1-2, pp. 57-90, IOS Press, 2000.
        # L.Vincent "Morphological Area Opening and Closing for Grayscale Images", Proc. NATO Shape in Picture Workshop, Driebergen, The Netherlands, pp. 197-208, 1992.
        # I.Ravkin, V.Temov "Bit representation techniques and image processing", Applied Informatics, v.14, pp. 41-90, Finances and Statistics, Moskow, 1988 (in Russian)
        # THIS IMPLEMENTATION INSTEAD OF OPENING USES EROSION FOLLOWED BY RECONSTRUCTION
        #
        ng = image.granular_spectrum_length.value
        startmean = np.mean(pixels[mask])
        ero = pixels.copy()
        # Mask the test image so that masked pixels will have no effect
        # during reconstruction
        #
        ero[~mask] = 0
        currentmean = startmean
        startmean = max(startmean, np.finfo(float).eps)

        if im.dimensions is 2:
            footprint = skimage.morphology.disk(1, dtype=bool)
        else:
            footprint = skimage.morphology.ball(1, dtype=bool)
        statistics = [image.image_name.value]
        for i in range(1, ng + 1):
            prevmean = currentmean
            ero_mask = np.zeros_like(ero)
            ero_mask[mask == True] = ero[mask == True]
            ero = skimage.morphology.erosion(ero_mask, selem=footprint)
            rec = skimage.morphology.reconstruction(ero,
                                                    pixels,
                                                    selem=footprint)
            currentmean = np.mean(rec[mask])
            gs = (prevmean - currentmean) * 100 / startmean
            statistics += ["%.2f" % gs]
            feature = image.granularity_feature(i)
            measurements.add_image_measurement(feature, gs)
            #
            # Restore the reconstructed image to the shape of the
            # original image so we can match against object labels
            #
            orig_shape = im.pixel_data.shape
            if im.dimensions is 2:
                i, j = np.mgrid[0:orig_shape[0], 0:orig_shape[1]].astype(float)
                #
                # Make sure the mapping only references the index range of
                # back_pixels.
                #
                i *= float(new_shape[0] - 1) / float(orig_shape[0] - 1)
                j *= float(new_shape[1] - 1) / float(orig_shape[1] - 1)
                rec = scind.map_coordinates(rec, (i, j), order=1)
            else:
                k, i, j = np.mgrid[0:orig_shape[0], 0:orig_shape[1],
                                   0:orig_shape[2]].astype(float)
                k *= float(new_shape[0] - 1) / float(orig_shape[0] - 1)
                i *= float(new_shape[1] - 1) / float(orig_shape[1] - 1)
                j *= float(new_shape[2] - 1) / float(orig_shape[2] - 1)
                rec = scind.map_coordinates(rec, (k, i, j), order=1)
            #
            # Calculate the means for the objects
            #
            for object_record in object_records:
                assert isinstance(object_record, ObjectRecord)
                if object_record.nobjects > 0:
                    new_mean = fix(
                        scind.mean(rec, object_record.labels,
                                   object_record.range))
                    gss = ((object_record.current_mean - new_mean) * 100 /
                           object_record.start_mean)
                    object_record.current_mean = new_mean
                else:
                    gss = np.zeros((0, ))
                measurements.add_measurement(object_record.name, feature, gss)
        return statistics
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        assert isinstance(objects, cpo.Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        assert isinstance(neighbor_objects, cpo.Objects)
        neighbor_labels = neighbor_objects.small_removed_segmented
        #
        # Need to add in labels touching border.
        #
        unedited_segmented = neighbor_objects.unedited_segmented
        touching_border = np.zeros(np.max(unedited_segmented) + 1, bool)
        touching_border[unedited_segmented[0, :]] = True
        touching_border[unedited_segmented[-1, :]] = True
        touching_border[unedited_segmented[:, 0]] = True
        touching_border[unedited_segmented[:, -1]] = True
        touching_border[0] = False
        touching_border_mask = touching_border[unedited_segmented]
        nobjects = np.max(labels)
        nkept_objects = objects.count
        nneighbors = np.max(neighbor_labels)
        if np.any(touching_border) and \
                np.all(~ touching_border_mask[neighbor_labels != 0]):
            # Add the border labels if any were excluded
            touching_border_object_number = np.cumsum(touching_border) + \
                                            np.max(neighbor_labels)
            touching_border_mask = touching_border_mask & (neighbor_labels
                                                           == 0)
            neighbor_labels = neighbor_labels.copy().astype(np.int32)
            neighbor_labels[
                touching_border_mask] = touching_border_object_number[
                    unedited_segmented[touching_border_mask]]

        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
            neighbor_has_pixels = has_pixels
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.segmented)
            neighbor_has_pixels = np.bincount(neighbor_labels.ravel())[1:] > 0
        neighbor_count = np.zeros((nobjects, ))
        pixel_count = np.zeros((nobjects, ))
        first_object_number = np.zeros((nobjects, ), int)
        second_object_number = np.zeros((nobjects, ), int)
        first_x_vector = np.zeros((nobjects, ))
        second_x_vector = np.zeros((nobjects, ))
        first_y_vector = np.zeros((nobjects, ))
        second_y_vector = np.zeros((nobjects, ))
        angle = np.zeros((nobjects, ))
        percent_touching = np.zeros((nobjects, ))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            i, j = scind.distance_transform_edt(labels == 0,
                                                return_distances=False,
                                                return_indices=True)
            # Assign each background pixel to the label of its nearest
            # foreground pixel. Assign label to label for foreground.
            labels = labels[i, j]
            expanded_labels = labels  # for display
            distance = 1  # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = np.arange(nobjects, dtype=np.int32) + 1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(
                scind.sum(np.ones(labels.shape), labels, object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(
                scind.sum(np.ones(labels.shape), perimeter_outlines,
                          object_indexes))

            i, j = np.mgrid[0:nobjects, 0:nneighbors]
            distance_matrix = np.sqrt((ocenters[i, 0] - ncenters[j, 0])**2 +
                                      (ocenters[i, 1] - ncenters[j, 1])**2)
            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            if distance_matrix.shape[1] == 1:
                # a little buggy, lexsort assumes that a 2-d array of
                # second dimension = 1 is a 1-d array
                order = np.zeros(distance_matrix.shape, int)
            else:
                order = np.lexsort([distance_matrix])
            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index, 1] - ocenters[:, 1]
            first_y_vector = ncenters[first_object_index, 0] - ocenters[:, 0]
            if nneighbors > first_neighbor + 1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,
                                           1] - ocenters[:, 1]
                second_y_vector = ncenters[second_object_index,
                                           0] - ocenters[:, 0]
                v1 = np.array((first_x_vector, first_y_vector))
                v2 = np.array((second_x_vector, second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = (np.sum(v1 * v2, 0) /
                       np.sqrt(np.sum(v1**2, 0) * np.sum(v2**2, 0)))
                angle = np.arccos(dot) * 180. / np.pi

            # Make the structuring element for dilation
            strel = strel_disk(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            strel_touching = strel_disk(distance + .5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
            min_i, max_i, min_i_pos, max_i_pos = \
                scind.extrema(i, labels, object_indexes)
            min_j, max_j, min_j_pos, max_j_pos = \
                scind.extrema(j, labels, object_indexes)
            min_i = np.maximum(fix(min_i) - distance, 0).astype(int)
            max_i = np.minimum(fix(max_i) + distance + 1,
                               labels.shape[0]).astype(int)
            min_j = np.maximum(fix(min_j) - distance, 0).astype(int)
            max_j = np.minimum(fix(max_j) + distance + 1,
                               labels.shape[1]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g. not renumbered.
                    #
                    continue
                index = object_number - 1
                patch = labels[min_i[index]:max_i[index],
                               min_j[index]:max_j[index]]
                npatch = neighbor_labels[min_i[index]:max_i[index],
                                         min_j[index]:max_j[index]]
                #
                # Find the neighbors
                #
                patch_mask = patch == (index + 1)
                extended = scind.binary_dilation(patch_mask, strel)
                neighbors = np.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(np.ones(nc, int) * object_number)
                    second_objects.append(neighbors)
                if self.neighbors_are_objects:
                    #
                    # Find the # of overlapping pixels. Dilate the neighbors
                    # and see how many pixels overlap our image. Use a 3x3
                    # structuring element to expand the overlapping edge
                    # into the perimeter.
                    #
                    outline_patch = perimeter_outlines[
                        min_i[index]:max_i[index],
                        min_j[index]:max_j[index]] == object_number
                    extended = scind.binary_dilation(
                        (patch != 0) & (patch != object_number),
                        strel_touching)
                    overlap = np.sum(outline_patch & extended)
                    pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = np.hstack(first_objects)
                reverse_object_numbers = np.zeros(
                    max(np.max(object_numbers), np.max(first_objects)) + 1,
                    int)
                reverse_object_numbers[object_numbers] = np.arange(
                    len(object_numbers)) + 1
                first_objects = reverse_object_numbers[first_objects]

                second_objects = np.hstack(second_objects)
                reverse_neighbor_numbers = np.zeros(
                    max(np.max(neighbor_numbers), np.max(second_objects)) + 1,
                    int)
                reverse_neighbor_numbers[neighbor_numbers] = np.arange(
                    len(neighbor_numbers)) + 1
                second_objects = reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects = second_objects[to_keep]
            else:
                first_objects = np.zeros(0, int)
                second_objects = np.zeros(0, int)
            if self.neighbors_are_objects:
                percent_touching = pixel_count * 100 / perimeters
            else:
                percent_touching = pixel_count * 100.0 / areas
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = np.zeros(nkept_objects, int)
            second_object_number = np.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, np.newaxis], 0] -
                      ncenters[neighbor_indexes[np.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, np.newaxis], 1] -
                      ncenters[neighbor_indexes[np.newaxis, :], 1])
                distance_matrix = np.sqrt(di * di + dj * dj)
                distance_matrix[~has_pixels, :] = np.inf
                distance_matrix[:, ~neighbor_has_pixels] = np.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = np.lexsort([distance_matrix
                                    ]).astype(first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels, 1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels, 0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = np.zeros(0, int)
            second_objects = np.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert (isinstance(workspace, cpw.Workspace))
        m = workspace.measurements
        assert (isinstance(m, cpmeas.Measurements))
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (M_FIRST_CLOSEST_DISTANCE,
             np.sqrt(first_x_vector**2 + first_y_vector**2)),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (M_SECOND_CLOSEST_DISTANCE,
             np.sqrt(second_x_vector**2 + second_y_vector**2)),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle)
        ]
        if self.neighbors_are_objects:
            features_and_data.append((M_PERCENT_TOUCHING, percent_touching))
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name), data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num, cpmeas.NEIGHBORS, self.object_name.value,
                self.object_name.value
                if self.neighbors_are_objects else self.neighbors_name.value,
                m.image_set_number * np.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * np.ones(second_objects.shape, int),
                second_objects)

        labels = kept_labels

        neighbor_count_image = np.zeros(labels.shape, int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask] - 1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image

        if self.neighbors_are_objects:
            percent_touching_image = np.zeros(labels.shape)
            percent_touching_image[object_mask] = percent_touching[
                object_indexes]
            workspace.display_data.percent_touching_image = percent_touching_image

        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            count_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = cpprefs.get_default_colormap()
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.neighbors_are_objects and self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            touching_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.touching_image_name.value, touching_image)
        else:
            percent_touching_cm_name = cpprefs.get_default_colormap()
            percent_touching_cm = matplotlib.cm.get_cmap(
                percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
Exemplo n.º 27
0
    def run_image_pair_objects(self, workspace, first_image_name,
                               second_image_name, object_name):
        '''Calculate per-object correlations between intensities in two images'''
        first_image = workspace.image_set.get_image(first_image_name,
                                                    must_be_grayscale=True)
        second_image = workspace.image_set.get_image(second_image_name,
                                                     must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        #
        # Crop both images to the size of the labels matrix
        #
        labels = objects.segmented
        try:
            first_pixels = objects.crop_image_similarly(first_image.pixel_data)
            first_mask = objects.crop_image_similarly(first_image.mask)
        except ValueError:
            first_pixels, m1 = cpo.size_similarly(labels,
                                                  first_image.pixel_data)
            first_mask, m1 = cpo.size_similarly(labels, first_image.mask)
            first_mask[~m1] = False
        try:
            second_pixels = objects.crop_image_similarly(
                second_image.pixel_data)
            second_mask = objects.crop_image_similarly(second_image.mask)
        except ValueError:
            second_pixels, m1 = cpo.size_similarly(labels,
                                                   second_image.pixel_data)
            second_mask, m1 = cpo.size_similarly(labels, second_image.mask)
            second_mask[~m1] = False
        mask = ((labels > 0) & first_mask & second_mask)
        first_pixels = first_pixels[mask]
        second_pixels = second_pixels[mask]
        labels = labels[mask]
        result = []
        first_pixel_data = first_image.pixel_data
        first_mask = first_image.mask
        first_pixel_count = np.product(first_pixel_data.shape)
        second_pixel_data = second_image.pixel_data
        second_mask = second_image.mask
        second_pixel_count = np.product(second_pixel_data.shape)
        #
        # Crop the larger image similarly to the smaller one
        #
        if first_pixel_count < second_pixel_count:
            second_pixel_data = first_image.crop_image_similarly(
                second_pixel_data)
            second_mask = first_image.crop_image_similarly(second_mask)
        elif second_pixel_count < first_pixel_count:
            first_pixel_data = second_image.crop_image_similarly(
                first_pixel_data)
            first_mask = second_image.crop_image_similarly(first_mask)
        mask = (first_mask & second_mask & (~np.isnan(first_pixel_data)) &
                (~np.isnan(second_pixel_data)))
        if np.any(mask):
            #
            # Perform the correlation, which returns:
            # [ [ii, ij],
            #   [ji, jj] ]
            #
            fi = first_pixel_data[mask]
            si = second_pixel_data[mask]

        n_objects = objects.count
        # Handle case when both images for the correlation are completely masked out

        if n_objects == 0:
            corr = np.zeros((0, ))
            overlap = np.zeros((0, ))
            K1 = np.zeros((0, ))
            K2 = np.zeros((0, ))
            M1 = np.zeros((0, ))
            M2 = np.zeros((0, ))
            RWC1 = np.zeros((0, ))
            RWC2 = np.zeros((0, ))
            C1 = np.zeros((0, ))
            C2 = np.zeros((0, ))
        elif np.where(mask)[0].__len__() == 0:
            corr = np.zeros((n_objects, ))
            corr[:] = np.NaN
            overlap = K1 = K2 = M1 = M2 = RWC1 = RWC2 = C1 = C2 = corr
        else:
            #
            # The correlation is sum((x-mean(x))(y-mean(y)) /
            #                         ((n-1) * std(x) *std(y)))
            #
            lrange = np.arange(n_objects, dtype=np.int32) + 1
            area = fix(scind.sum(np.ones_like(labels), labels, lrange))
            mean1 = fix(scind.mean(first_pixels, labels, lrange))
            mean2 = fix(scind.mean(second_pixels, labels, lrange))
            #
            # Calculate the standard deviation times the population.
            #
            std1 = np.sqrt(
                fix(
                    scind.sum((first_pixels - mean1[labels - 1])**2, labels,
                              lrange)))
            std2 = np.sqrt(
                fix(
                    scind.sum((second_pixels - mean2[labels - 1])**2, labels,
                              lrange)))
            x = first_pixels - mean1[labels - 1]  # x - mean(x)
            y = second_pixels - mean2[labels - 1]  # y - mean(y)
            corr = fix(
                scind.sum(x * y / (std1[labels - 1] * std2[labels - 1]),
                          labels, lrange))
            # Explicitly set the correlation to NaN for masked objects
            corr[scind.sum(1, labels, lrange) == 0] = np.NaN
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Correlation coeff",
                "%.3f" % np.mean(corr)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Correlation coeff",
                           "%.3f" % np.median(corr)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Correlation coeff",
                           "%.3f" % np.min(corr)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Correlation coeff",
                           "%.3f" % np.max(corr)
                       ]]

            # Threshold as percentage of maximum intensity of objects in each channel
            tff = (self.thr.value / 100) * fix(
                scind.maximum(first_pixels, labels, lrange))
            tss = (self.thr.value / 100) * fix(
                scind.maximum(second_pixels, labels, lrange))

            combined_thresh = (first_pixels >= tff[labels - 1]) & (
                second_pixels >= tss[labels - 1])
            fi_thresh = first_pixels[combined_thresh]
            si_thresh = second_pixels[combined_thresh]
            tot_fi_thr = scind.sum(
                first_pixels[first_pixels >= tff[labels - 1]],
                labels[first_pixels >= tff[labels - 1]], lrange)
            tot_si_thr = scind.sum(
                second_pixels[second_pixels >= tss[labels - 1]],
                labels[second_pixels >= tss[labels - 1]], lrange)

            nonZero = (fi > 0) | (si > 0)
            xvar = np.var(fi[nonZero], axis=0, ddof=1)
            yvar = np.var(si[nonZero], axis=0, ddof=1)

            xmean = np.mean(fi[nonZero], axis=0)
            ymean = np.mean(si[nonZero], axis=0)

            z = fi[nonZero] + si[nonZero]
            zvar = np.var(z, axis=0, ddof=1)

            covar = 0.5 * (zvar - (xvar + yvar))

            denom = 2 * covar
            num = (yvar - xvar) + np.sqrt((yvar - xvar) * (yvar - xvar) + 4 *
                                          (covar * covar))
            a = (num / denom)
            b = (ymean - a * xmean)

            i = 1
            while i > 0.003921568627:
                thr_fi_c = i
                thr_si_c = (a * i) + b
                combt = (fi < thr_fi_c) | (si < thr_si_c)
                costReg = scistat.pearsonr(fi[combt], si[combt])
                if costReg[0] <= 0:
                    break
                i = i - 0.003921568627

            # Costes' thershold for entire image is applied to each object
            fi_above_thr = first_pixels > thr_fi_c
            si_above_thr = second_pixels > thr_si_c
            combined_thresh_c = fi_above_thr & si_above_thr
            fi_thresh_c = first_pixels[combined_thresh_c]
            si_thresh_c = second_pixels[combined_thresh_c]
            if np.any(fi_above_thr):
                tot_fi_thr_c = scind.sum(
                    first_pixels[first_pixels >= thr_fi_c],
                    labels[first_pixels >= thr_fi_c], lrange)
            else:
                tot_fi_thr_c = np.zeros(len(lrange))
            if np.any(si_above_thr):
                tot_si_thr_c = scind.sum(
                    second_pixels[second_pixels >= thr_si_c],
                    labels[second_pixels >= thr_si_c], lrange)
            else:
                tot_si_thr_c = np.zeros(len(lrange))

            # Manders Coefficient
            M1 = np.zeros(len(lrange))
            M2 = np.zeros(len(lrange))

            if np.any(combined_thresh):
                M1 = np.array(
                    scind.sum(fi_thresh, labels[combined_thresh],
                              lrange)) / np.array(tot_fi_thr)
                M2 = np.array(
                    scind.sum(si_thresh, labels[combined_thresh],
                              lrange)) / np.array(tot_si_thr)
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Manders coeff",
                "%.3f" % np.mean(M1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Manders coeff",
                           "%.3f" % np.median(M1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Manders coeff",
                           "%.3f" % np.min(M1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Manders coeff",
                           "%.3f" % np.max(M1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean Manders coeff",
                "%.3f" % np.mean(M2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median Manders coeff",
                           "%.3f" % np.median(M2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min Manders coeff",
                           "%.3f" % np.min(M2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max Manders coeff",
                           "%.3f" % np.max(M2)
                       ]]

            # RWC Coefficient
            RWC1 = np.zeros(len(lrange))
            RWC2 = np.zeros(len(lrange))
            [Rank1] = np.lexsort(([labels], [first_pixels]))
            [Rank2] = np.lexsort(([labels], [second_pixels]))
            Rank1_U = np.hstack(
                [[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]])
            Rank2_U = np.hstack(
                [[False],
                 second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]])
            Rank1_S = np.cumsum(Rank1_U)
            Rank2_S = np.cumsum(Rank2_U)
            Rank_im1 = np.zeros(first_pixels.shape, dtype=int)
            Rank_im2 = np.zeros(second_pixels.shape, dtype=int)
            Rank_im1[Rank1] = Rank1_S
            Rank_im2[Rank2] = Rank2_S

            R = max(Rank_im1.max(), Rank_im2.max()) + 1
            Di = abs(Rank_im1 - Rank_im2)
            weight = (R - Di) * 1.0 / R
            weight_thresh = weight[combined_thresh]

            if np.any(combined_thresh):
                RWC1 = np.array(
                    scind.sum(fi_thresh * weight_thresh,
                              labels[combined_thresh],
                              lrange)) / np.array(tot_fi_thr)
                RWC2 = np.array(
                    scind.sum(si_thresh * weight_thresh,
                              labels[combined_thresh],
                              lrange)) / np.array(tot_si_thr)

            result += [[
                first_image_name, second_image_name, object_name,
                "Mean RWC coeff",
                "%.3f" % np.mean(RWC1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median RWC coeff",
                           "%.3f" % np.median(RWC1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min RWC coeff",
                           "%.3f" % np.min(RWC1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max RWC coeff",
                           "%.3f" % np.max(RWC1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean RWC coeff",
                "%.3f" % np.mean(RWC2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median RWC coeff",
                           "%.3f" % np.median(RWC2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min RWC coeff",
                           "%.3f" % np.min(RWC2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max RWC coeff",
                           "%.3f" % np.max(RWC2)
                       ]]

            # Costes Automated Threshold
            C1 = np.zeros(len(lrange))
            C2 = np.zeros(len(lrange))
            if np.any(combined_thresh_c):
                C1 = np.array(
                    scind.sum(fi_thresh_c, labels[combined_thresh_c],
                              lrange)) / np.array(tot_fi_thr_c)
                C2 = np.array(
                    scind.sum(si_thresh_c, labels[combined_thresh_c],
                              lrange)) / np.array(tot_si_thr_c)
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Manders coeff (Costes)",
                "%.3f" % np.mean(C1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Manders coeff (Costes)",
                           "%.3f" % np.median(C1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Manders coeff (Costes)",
                           "%.3f" % np.min(C1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Manders coeff (Costes)",
                           "%.3f" % np.max(C1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean Manders coeff (Costes)",
                "%.3f" % np.mean(C2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median Manders coeff (Costes)",
                           "%.3f" % np.median(C2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min Manders coeff (Costes)",
                           "%.3f" % np.min(C2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max Manders coeff (Costes)",
                           "%.3f" % np.max(C2)
                       ]]

            # Overlap Coefficient
            if np.any(combined_thresh):
                fpsq = scind.sum(first_pixels[combined_thresh]**2,
                                 labels[combined_thresh], lrange)
                spsq = scind.sum(second_pixels[combined_thresh]**2,
                                 labels[combined_thresh], lrange)
                pdt = np.sqrt(np.array(fpsq) * np.array(spsq))

                overlap = fix(
                    scind.sum(
                        first_pixels[combined_thresh] *
                        second_pixels[combined_thresh],
                        labels[combined_thresh], lrange) / pdt)
                K1 = fix((scind.sum(
                    first_pixels[combined_thresh] *
                    second_pixels[combined_thresh], labels[combined_thresh],
                    lrange)) / (np.array(fpsq)))
                K2 = fix(
                    scind.sum(
                        first_pixels[combined_thresh] *
                        second_pixels[combined_thresh],
                        labels[combined_thresh], lrange) / np.array(spsq))
            else:
                overlap = K1 = K2 = np.zeros(len(lrange))
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Overlap coeff",
                "%.3f" % np.mean(overlap)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Overlap coeff",
                           "%.3f" % np.median(overlap)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Overlap coeff",
                           "%.3f" % np.min(overlap)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Overlap coeff",
                           "%.3f" % np.max(overlap)
                       ]]

        measurement = ("Correlation_Correlation_%s_%s" %
                       (first_image_name, second_image_name))
        overlap_measurement = (F_OVERLAP_FORMAT %
                               (first_image_name, second_image_name))
        k_measurement_1 = (F_K_FORMAT % (first_image_name, second_image_name))
        k_measurement_2 = (F_K_FORMAT % (second_image_name, first_image_name))
        manders_measurement_1 = (F_MANDERS_FORMAT %
                                 (first_image_name, second_image_name))
        manders_measurement_2 = (F_MANDERS_FORMAT %
                                 (second_image_name, first_image_name))
        rwc_measurement_1 = (F_RWC_FORMAT %
                             (first_image_name, second_image_name))
        rwc_measurement_2 = (F_RWC_FORMAT %
                             (second_image_name, first_image_name))
        costes_measurement_1 = (F_COSTES_FORMAT %
                                (first_image_name, second_image_name))
        costes_measurement_2 = (F_COSTES_FORMAT %
                                (second_image_name, first_image_name))

        workspace.measurements.add_measurement(object_name, measurement, corr)
        workspace.measurements.add_measurement(object_name,
                                               overlap_measurement, overlap)
        workspace.measurements.add_measurement(object_name, k_measurement_1,
                                               K1)
        workspace.measurements.add_measurement(object_name, k_measurement_2,
                                               K2)
        workspace.measurements.add_measurement(object_name,
                                               manders_measurement_1, M1)
        workspace.measurements.add_measurement(object_name,
                                               manders_measurement_2, M2)
        workspace.measurements.add_measurement(object_name, rwc_measurement_1,
                                               RWC1)
        workspace.measurements.add_measurement(object_name, rwc_measurement_2,
                                               RWC2)
        workspace.measurements.add_measurement(object_name,
                                               costes_measurement_1, C1)
        workspace.measurements.add_measurement(object_name,
                                               costes_measurement_2, C2)

        if n_objects == 0:
            return [[
                first_image_name, second_image_name, object_name,
                "Mean correlation", "-"
            ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Median correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Min correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Max correlation", "-"
                    ]]
        else:
            return result
Exemplo n.º 28
0
    def make_objskeleton_graph(self, skeleton, skeleton_labels, trunks,
                               branchpoints, endpoints, image):
        '''Make a table that captures the graph relationship of the skeleton

        skeleton - binary skeleton image + outline of seed objects
        skeleton_labels - labels matrix of skeleton
        trunks - binary image with trunk points as 1
        branchpoints - binary image with branchpoints as 1
        endpoints - binary image with endpoints as 1
        image - image for intensity measurement

        returns two tables.
        Table 1: edge table
        The edge table is a numpy record array with the following named
        columns in the following order:
        v1: index into vertex table of first vertex of edge
        v2: index into vertex table of second vertex of edge
        length: # of intermediate pixels + 2 (for two vertices)
        total_intensity: sum of intensities along the edge

        Table 2: vertex table
        The vertex table is a numpy record array:
        i: I coordinate of the vertex
        j: J coordinate of the vertex
        label: the vertex's label
        kind: kind of vertex = "T" for trunk, "B" for branchpoint or "E" for endpoint.
        '''
        i, j = np.mgrid[0:skeleton.shape[0], 0:skeleton.shape[1]]
        #
        # Give each point of interest a unique number
        #
        points_of_interest = trunks | branchpoints | endpoints
        number_of_points = np.sum(points_of_interest)
        #
        # Make up the vertex table
        #
        tbe = np.zeros(points_of_interest.shape, '|S1')
        tbe[trunks] = 'T'
        tbe[branchpoints] = 'B'
        tbe[endpoints] = 'E'
        i_idx = i[points_of_interest]
        j_idx = j[points_of_interest]
        poe_labels = skeleton_labels[points_of_interest]
        tbe = tbe[points_of_interest]
        vertex_table = {
            self.VF_I: i_idx,
            self.VF_J: j_idx,
            self.VF_LABELS: poe_labels,
            self.VF_KIND: tbe
        }
        #
        # First, break the skeleton by removing the branchpoints, endpoints
        # and trunks
        #
        broken_skeleton = skeleton & (~points_of_interest)
        #
        # Label the broken skeleton: this labels each edge differently
        #
        edge_labels, nlabels = morph.label_skeleton(skeleton)
        #
        # Reindex after removing the points of interest
        #
        edge_labels[points_of_interest] = 0
        if nlabels > 0:
            indexer = np.arange(nlabels + 1)
            unique_labels = np.sort(np.unique(edge_labels))
            nlabels = len(unique_labels) - 1
            indexer[unique_labels] = np.arange(len(unique_labels))
            edge_labels = indexer[edge_labels]
            #
            # find magnitudes and lengths for all edges
            #
            magnitudes = fix(
                scind.sum(image, edge_labels,
                          np.arange(1, nlabels + 1, dtype=np.int32)))
            lengths = fix(
                scind.sum(np.ones(edge_labels.shape), edge_labels,
                          np.arange(1, nlabels + 1,
                                    dtype=np.int32))).astype(int)
        else:
            magnitudes = np.zeros(0)
            lengths = np.zeros(0, int)
        #
        # combine the edge labels and indexes of points of interest with padding
        #
        edge_mask = edge_labels != 0
        all_labels = np.zeros(np.array(edge_labels.shape) + 2, int)
        all_labels[1:-1,
                   1:-1][edge_mask] = edge_labels[edge_mask] + number_of_points
        all_labels[i_idx + 1, j_idx + 1] = np.arange(1, number_of_points + 1)
        #
        # Collect all 8 neighbors for each point of interest
        #
        p1 = np.zeros(0, int)
        p2 = np.zeros(0, int)
        for i_off, j_off in ((0, 0), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0),
                             (2, 1), (2, 2)):
            p1 = np.hstack((p1, np.arange(1, number_of_points + 1)))
            p2 = np.hstack((p2, all_labels[i_idx + i_off, j_idx + j_off]))
        #
        # Get rid of zeros which are background
        #
        p1 = p1[p2 != 0]
        p2 = p2[p2 != 0]
        #
        # Find point_of_interest -> point_of_interest connections.
        #
        p1_poi = p1[(p2 <= number_of_points) & (p1 < p2)]
        p2_poi = p2[(p2 <= number_of_points) & (p1 < p2)]
        #
        # Make sure matches are labeled the same
        #
        same_labels = (
            skeleton_labels[i_idx[p1_poi - 1],
                            j_idx[p1_poi -
                                  1]] == skeleton_labels[i_idx[p2_poi - 1],
                                                         j_idx[p2_poi - 1]])
        p1_poi = p1_poi[same_labels]
        p2_poi = p2_poi[same_labels]
        #
        # Find point_of_interest -> edge
        #
        p1_edge = p1[p2 > number_of_points]
        edge = p2[p2 > number_of_points]
        #
        # Now, each value that p2_edge takes forms a group and all
        # p1_edge whose p2_edge are connected together by the edge.
        # Possibly they touch each other without the edge, but we will
        # take the minimum distance connecting each pair to throw out
        # the edge.
        #
        edge, p1_edge, p2_edge = morph.pairwise_permutations(edge, p1_edge)
        indexer = edge - number_of_points - 1
        lengths = lengths[indexer]
        magnitudes = magnitudes[indexer]
        #
        # OK, now we make the edge table. First poi<->poi. Length = 2,
        # magnitude = magnitude at each point
        #
        poi_length = np.ones(len(p1_poi)) * 2
        poi_magnitude = (image[i_idx[p1_poi - 1], j_idx[p1_poi - 1]] +
                         image[i_idx[p2_poi - 1], j_idx[p2_poi - 1]])
        #
        # Now the edges...
        #
        poi_edge_length = lengths + 2
        poi_edge_magnitude = (image[i_idx[p1_edge - 1], j_idx[p1_edge - 1]] +
                              image[i_idx[p2_edge - 1], j_idx[p2_edge - 1]] +
                              magnitudes)
        #
        # Put together the columns
        #
        v1 = np.hstack((p1_poi, p1_edge))
        v2 = np.hstack((p2_poi, p2_edge))
        lengths = np.hstack((poi_length, poi_edge_length))
        magnitudes = np.hstack((poi_magnitude, poi_edge_magnitude))
        #
        # Sort by p1, p2 and length in order to pick the shortest length
        #
        indexer = np.lexsort((lengths, v1, v2))
        v1 = v1[indexer]
        v2 = v2[indexer]
        lengths = lengths[indexer]
        magnitudes = magnitudes[indexer]
        if len(v1) > 0:
            to_keep = np.hstack(
                ([True], (v1[1:] != v1[:-1]) | (v2[1:] != v2[:-1])))
            v1 = v1[to_keep]
            v2 = v2[to_keep]
            lengths = lengths[to_keep]
            magnitudes = magnitudes[to_keep]
        #
        # Put it all together into a table
        #
        edge_table = {
            self.EF_V1: v1,
            self.EF_V2: v2,
            self.EF_LENGTH: lengths,
            self.EF_TOTAL_INTENSITY: magnitudes
        }
        return edge_table, vertex_table
Exemplo n.º 29
0
def minimum(input, labels, index):
    return fix(scind.minimum(input, labels, index))
Exemplo n.º 30
0
    def run_on_image_setting(self, workspace, image):
        assert isinstance(workspace, cpw.Workspace)
        image_set = workspace.image_set
        measurements = workspace.measurements
        im = image_set.get_image(image.image_name.value,
                                 must_be_grayscale=True)
        #
        # Downsample the image and mask
        #
        new_shape = np.array(im.pixel_data.shape)
        if image.subsample_size.value < 1:
            new_shape = new_shape * image.subsample_size.value
            if im.dimensions is 2:
                i, j = (np.mgrid[0:new_shape[0], 0:new_shape[1]].astype(float) / image.subsample_size.value)
                pixels = scind.map_coordinates(im.pixel_data, (i, j), order=1)
                mask = scind.map_coordinates(im.mask.astype(float), (i, j)) > .9
            else:
                k, i, j = (np.mgrid[0:new_shape[0], 0:new_shape[1], 0:new_shape[2]].astype(float) / image.subsample_size.value)
                pixels = scind.map_coordinates(im.pixel_data, (k, i, j), order=1)
                mask = scind.map_coordinates(im.mask.astype(float), (k, i, j)) > .9
        else:
            pixels = im.pixel_data
            mask = im.mask
        #
        # Remove background pixels using a greyscale tophat filter
        #
        if image.image_sample_size.value < 1:
            back_shape = new_shape * image.image_sample_size.value
            if im.dimensions is 2:
                i, j = (np.mgrid[0:back_shape[0], 0:back_shape[1]].astype(float) / image.image_sample_size.value)
                back_pixels = scind.map_coordinates(pixels, (i, j), order=1)
                back_mask = scind.map_coordinates(mask.astype(float), (i, j)) > .9
            else:
                k, i, j = (np.mgrid[0:new_shape[0], 0:new_shape[1], 0:new_shape[2]].astype(float) / image.subsample_size.value)
                back_pixels = scind.map_coordinates(pixels, (k, i, j), order=1)
                back_mask = scind.map_coordinates(mask.astype(float), (k, i, j)) > .9
        else:
            back_pixels = pixels
            back_mask = mask
            back_shape = new_shape
        radius = image.element_size.value
        if im.dimensions is 2:
            selem = skimage.morphology.disk(radius, dtype=bool)
        else:
            selem = skimage.morphology.ball(radius, dtype=bool)
        back_pixels_mask = np.zeros_like(back_pixels)
        back_pixels_mask[back_mask == True] = back_pixels[back_mask == True]
        back_pixels = skimage.morphology.erosion(back_pixels_mask, selem=selem)
        back_pixels_mask = np.zeros_like(back_pixels)
        back_pixels_mask[back_mask == True] = back_pixels[back_mask == True]
        back_pixels = skimage.morphology.dilation(back_pixels_mask, selem=selem)
        if image.image_sample_size.value < 1:
            if im.dimensions is 2:
                i, j = np.mgrid[0:new_shape[0], 0:new_shape[1]].astype(float)
                #
                # Make sure the mapping only references the index range of
                # back_pixels.
                #
                i *= float(back_shape[0] - 1) / float(new_shape[0] - 1)
                j *= float(back_shape[1] - 1) / float(new_shape[1] - 1)
                back_pixels = scind.map_coordinates(back_pixels, (i, j), order=1)
            else:
                k, i, j = np.mgrid[0:new_shape[0], 0:new_shape[1], 0:new_shape[2]].astype(float)
                k *= float(back_shape[0] - 1) / float(new_shape[0] - 1)
                i *= float(back_shape[1] - 1) / float(new_shape[1] - 1)
                j *= float(back_shape[2] - 1) / float(new_shape[2] - 1)
                back_pixels = scind.map_coordinates(back_pixels, (k, i, j), order=1)
        pixels -= back_pixels
        pixels[pixels < 0] = 0

        #
        # For each object, build a little record
        #
        class ObjectRecord(object):
            def __init__(self, name):
                self.name = name
                self.labels = workspace.object_set.get_objects(name).segmented
                self.nobjects = np.max(self.labels)
                if self.nobjects != 0:
                    self.range = np.arange(1, np.max(self.labels) + 1)
                    self.labels = self.labels.copy()
                    self.labels[~ im.mask] = 0
                    self.current_mean = fix(
                            scind.mean(im.pixel_data,
                                       self.labels,
                                       self.range))
                    self.start_mean = np.maximum(
                            self.current_mean, np.finfo(float).eps)

        object_records = [ObjectRecord(ob.objects_name.value)
                          for ob in image.objects]
        #
        # Transcribed from the Matlab module: granspectr function
        #
        # CALCULATES GRANULAR SPECTRUM, ALSO KNOWN AS SIZE DISTRIBUTION,
        # GRANULOMETRY, AND PATTERN SPECTRUM, SEE REF.:
        # J.Serra, Image Analysis and Mathematical Morphology, Vol. 1. Academic Press, London, 1989
        # Maragos,P. "Pattern spectrum and multiscale shape representation", IEEE Transactions on Pattern Analysis and Machine Intelligence, 11, N 7, pp. 701-716, 1989
        # L.Vincent "Granulometries and Opening Trees", Fundamenta Informaticae, 41, No. 1-2, pp. 57-90, IOS Press, 2000.
        # L.Vincent "Morphological Area Opening and Closing for Grayscale Images", Proc. NATO Shape in Picture Workshop, Driebergen, The Netherlands, pp. 197-208, 1992.
        # I.Ravkin, V.Temov "Bit representation techniques and image processing", Applied Informatics, v.14, pp. 41-90, Finances and Statistics, Moskow, 1988 (in Russian)
        # THIS IMPLEMENTATION INSTEAD OF OPENING USES EROSION FOLLOWED BY RECONSTRUCTION
        #
        ng = image.granular_spectrum_length.value
        startmean = np.mean(pixels[mask])
        ero = pixels.copy()
        # Mask the test image so that masked pixels will have no effect
        # during reconstruction
        #
        ero[~mask] = 0
        currentmean = startmean
        startmean = max(startmean, np.finfo(float).eps)

        if im.dimensions is 2:
            footprint = skimage.morphology.disk(1, dtype=bool)
        else:
            footprint = skimage.morphology.ball(1, dtype=bool)
        statistics = [image.image_name.value]
        for i in range(1, ng + 1):
            prevmean = currentmean
            ero_mask = np.zeros_like(ero)
            ero_mask[mask == True] = ero[mask == True]
            ero = skimage.morphology.erosion(ero_mask, selem=footprint)
            rec = skimage.morphology.reconstruction(ero, pixels, selem=footprint)
            currentmean = np.mean(rec[mask])
            gs = (prevmean - currentmean) * 100 / startmean
            statistics += ["%.2f" % gs]
            feature = image.granularity_feature(i)
            measurements.add_image_measurement(feature, gs)
            #
            # Restore the reconstructed image to the shape of the
            # original image so we can match against object labels
            #
            orig_shape = im.pixel_data.shape
            if im.dimensions is 2:
                i, j = np.mgrid[0:orig_shape[0], 0:orig_shape[1]].astype(float)
                #
                # Make sure the mapping only references the index range of
                # back_pixels.
                #
                i *= float(new_shape[0] - 1) / float(orig_shape[0] - 1)
                j *= float(new_shape[1] - 1) / float(orig_shape[1] - 1)
                rec = scind.map_coordinates(rec, (i, j), order=1)
            else:
                k, i, j = np.mgrid[0:orig_shape[0], 0:orig_shape[1], 0:orig_shape[2]].astype(float)
                k *= float(new_shape[0] - 1) / float(orig_shape[0] - 1)
                i *= float(new_shape[1] - 1) / float(orig_shape[1] - 1)
                j *= float(new_shape[2] - 1) / float(orig_shape[2] - 1)
                rec = scind.map_coordinates(rec, (k, i, j), order=1)
            #
            # Calculate the means for the objects
            #
            for object_record in object_records:
                assert isinstance(object_record, ObjectRecord)
                if object_record.nobjects > 0:
                    new_mean = fix(scind.mean(rec, object_record.labels,
                                              object_record.range))
                    gss = ((object_record.current_mean - new_mean) * 100 /
                           object_record.start_mean)
                    object_record.current_mean = new_mean
                else:
                    gss = np.zeros((0,))
                measurements.add_measurement(object_record.name, feature, gss)
        return statistics
Exemplo n.º 31
0
    def run(self, workspace):
        parents = workspace.object_set.get_objects(self.parent_name.value)
        children = workspace.object_set.get_objects(self.sub_object_name.value)
        child_count, parents_of = parents.relate_children(children)
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        m.add_measurement(self.sub_object_name.value,
                          FF_PARENT%(self.parent_name.value),
                          parents_of)
        m.add_measurement(self.parent_name.value,
                          FF_CHILDREN_COUNT%(self.sub_object_name.value),
                          child_count)
        good_parents = parents_of[parents_of != 0]
        image_numbers = np.ones(len(good_parents), int) * m.image_set_number
        good_children = np.argwhere(parents_of != 0).flatten() + 1
        if np.any(good_parents):
            m.add_relate_measurement(self.module_num,
                                     R_PARENT, 
                                     self.parent_name.value,
                                     self.sub_object_name.value,
                                     image_numbers,
                                     good_parents,
                                     image_numbers,
                                     good_children)
            m.add_relate_measurement(self.module_num,
                                     R_CHILD, 
                                     self.sub_object_name.value,
                                     self.parent_name.value,
                                     image_numbers,
                                     good_children,
                                     image_numbers,
                                     good_parents)
        parent_names = self.get_parent_names()
        for parent_name in parent_names:
            if self.find_parent_child_distances in (D_BOTH, D_CENTROID):
                self.calculate_centroid_distances(workspace, parent_name)
            if self.find_parent_child_distances in (D_BOTH, D_MINIMUM):
                self.calculate_minimum_distances(workspace, parent_name)

        if self.wants_per_parent_means.value:
            parent_indexes = np.arange(np.max(parents.segmented))+1
            for feature_name in m.get_feature_names(self.sub_object_name.value):
                if not self.should_aggregate_feature(feature_name):
                    continue
                data = m.get_current_measurement(self.sub_object_name.value,
                                                 feature_name)
                if data is not None and len(data) > 0:
                    if len(parents_of) > 0:
                        means = fix(scind.mean(data.astype(float), 
                                               parents_of, parent_indexes))
                    else:
                        means = np.zeros((0,))
                else:
                    # No child measurements - all NaN
                    means = np.ones(len(parents_of)) * np.nan
                mean_feature_name = FF_MEAN%(self.sub_object_name.value,
                                             feature_name)
                m.add_measurement(self.parent_name.value, mean_feature_name,
                                  means)
        
        if self.show_window:
            workspace.display_data.parent_labels = parents.segmented
            workspace.display_data.parent_count = parents.count
            workspace.display_data.child_labels = children.segmented
            workspace.display_data.parents_of = parents_of
Exemplo n.º 32
0
    def run_on_objects(self, object_name, workspace):
        """Run, computing the area measurements for a single map of objects"""
        objects = workspace.get_objects(object_name)

        if len(objects.shape) == 2:
            #
            # Do the ellipse-related measurements
            #
            i, j, l = objects.ijv.transpose()
            centers, eccentricity, major_axis_length, minor_axis_length, \
            theta, compactness = \
                ellipse_from_second_moments_ijv(i, j, 1, l, objects.indices, True)
            del i
            del j
            del l
            self.record_measurement(workspace, object_name, F_ECCENTRICITY,
                                    eccentricity)
            self.record_measurement(workspace, object_name,
                                    F_MAJOR_AXIS_LENGTH, major_axis_length)
            self.record_measurement(workspace, object_name,
                                    F_MINOR_AXIS_LENGTH, minor_axis_length)
            self.record_measurement(workspace, object_name, F_ORIENTATION,
                                    theta * 180 / np.pi)
            self.record_measurement(workspace, object_name, F_COMPACTNESS,
                                    compactness)
            is_first = False
            if len(objects.indices) == 0:
                nobjects = 0
            else:
                nobjects = np.max(objects.indices)
            mcenter_x = np.zeros(nobjects)
            mcenter_y = np.zeros(nobjects)
            mextent = np.zeros(nobjects)
            mperimeters = np.zeros(nobjects)
            msolidity = np.zeros(nobjects)
            euler = np.zeros(nobjects)
            max_radius = np.zeros(nobjects)
            median_radius = np.zeros(nobjects)
            mean_radius = np.zeros(nobjects)
            min_feret_diameter = np.zeros(nobjects)
            max_feret_diameter = np.zeros(nobjects)
            zernike_numbers = self.get_zernike_numbers()
            zf = {}
            for n, m in zernike_numbers:
                zf[(n, m)] = np.zeros(nobjects)
            if nobjects > 0:
                chulls, chull_counts = convex_hull_ijv(objects.ijv,
                                                       objects.indices)
                for labels, indices in objects.get_labels():
                    to_indices = indices - 1
                    distances = distance_to_edge(labels)
                    mcenter_y[to_indices], mcenter_x[to_indices] = \
                        maximum_position_of_labels(distances, labels, indices)
                    max_radius[to_indices] = fix(
                        scind.maximum(distances, labels, indices))
                    mean_radius[to_indices] = fix(
                        scind.mean(distances, labels, indices))
                    median_radius[to_indices] = median_of_labels(
                        distances, labels, indices)
                    #
                    # The extent (area / bounding box area)
                    #
                    mextent[to_indices] = calculate_extents(labels, indices)
                    #
                    # The perimeter distance
                    #
                    mperimeters[to_indices] = calculate_perimeters(
                        labels, indices)
                    #
                    # Solidity
                    #
                    msolidity[to_indices] = calculate_solidity(labels, indices)
                    #
                    # Euler number
                    #
                    euler[to_indices] = euler_number(labels, indices)
                    #
                    # Zernike features
                    #
                    if self.calculate_zernikes.value:
                        zf_l = cpmz.zernike(zernike_numbers, labels, indices)
                        for (n, m), z in zip(zernike_numbers,
                                             zf_l.transpose()):
                            zf[(n, m)][to_indices] = z
                #
                # Form factor
                #
                ff = 4.0 * np.pi * objects.areas / mperimeters**2
                #
                # Feret diameter
                #
                min_feret_diameter, max_feret_diameter = \
                    feret_diameter(chulls, chull_counts, objects.indices)

            else:
                ff = np.zeros(0)

            for f, m in ([(F_AREA, objects.areas), (F_CENTER_X, mcenter_x),
                          (F_CENTER_Y, mcenter_y),
                          (F_CENTER_Z, np.ones_like(mcenter_x)),
                          (F_EXTENT, mextent), (F_PERIMETER, mperimeters),
                          (F_SOLIDITY, msolidity), (F_FORM_FACTOR, ff),
                          (F_EULER_NUMBER, euler),
                          (F_MAXIMUM_RADIUS, max_radius),
                          (F_MEAN_RADIUS, mean_radius),
                          (F_MEDIAN_RADIUS, median_radius),
                          (F_MIN_FERET_DIAMETER, min_feret_diameter),
                          (F_MAX_FERET_DIAMETER, max_feret_diameter)] +
                         [(self.get_zernike_name((n, m)), zf[(n, m)])
                          for n, m in zernike_numbers]):
                self.record_measurement(workspace, object_name, f, m)
        else:
            labels = objects.segmented

            props = skimage.measure.regionprops(labels)

            # Area
            areas = [prop.area for prop in props]

            self.record_measurement(workspace, object_name, F_AREA, areas)

            # Extent
            extents = [prop.extent for prop in props]

            self.record_measurement(workspace, object_name, F_EXTENT, extents)

            # Centers of mass
            centers = objects.center_of_mass()

            center_z, center_y, center_x = centers.transpose()

            self.record_measurement(workspace, object_name, F_CENTER_X,
                                    center_x)

            self.record_measurement(workspace, object_name, F_CENTER_Y,
                                    center_y)

            self.record_measurement(workspace, object_name, F_CENTER_Z,
                                    center_z)

            # Perimeters
            perimeters = []

            for label in np.unique(labels):
                if label == 0:
                    continue

                volume = np.zeros_like(labels, dtype='bool')

                volume[labels == label] = True

                verts, faces, _, _ = skimage.measure.marching_cubes(
                    volume,
                    spacing=objects.parent_image.spacing
                    if objects.has_parent_image else (1.0, ) * labels.ndim,
                    level=0)

                perimeters += [skimage.measure.mesh_surface_area(verts, faces)]

            if len(perimeters) == 0:
                self.record_measurement(workspace, object_name, F_PERIMETER,
                                        [0])
            else:
                self.record_measurement(workspace, object_name, F_PERIMETER,
                                        perimeters)

            for feature in self.get_feature_names():
                if feature in [
                        F_AREA, F_EXTENT, F_CENTER_X, F_CENTER_Y, F_CENTER_Z,
                        F_PERIMETER
                ]:
                    continue

                self.record_measurement(workspace, object_name, feature,
                                        [np.nan])
Exemplo n.º 33
0
    def run(self, workspace):
        '''Run the algorithm on one image set'''
        #
        # Get the image as a binary image
        #
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value, must_be_binary=True)
        mask = image.pixel_data
        if image.has_mask:
            mask = mask & image.mask
        angle_count = self.angle_count.value
        #
        # We collect the i,j and angle of pairs of points that
        # are 3-d adjacent after erosion.
        #
        # i - the i coordinate of each point found after erosion
        # j - the j coordinate of each point found after erosion
        # a - the angle of the structuring element for each point found
        #
        i = np.zeros(0, int)
        j = np.zeros(0, int)
        a = np.zeros(0, int)

        ig, jg = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
        this_idx = 0
        for angle_number in range(angle_count):
            angle = float(angle_number) * np.pi / float(angle_count)
            strel = self.get_diamond(angle)
            erosion = binary_erosion(mask, strel)
            #
            # Accumulate the count, i, j and angle for all foreground points
            # in the erosion
            #
            this_count = np.sum(erosion)
            i = np.hstack((i, ig[erosion]))
            j = np.hstack((j, jg[erosion]))
            a = np.hstack((a, np.ones(this_count, float) * angle))
        #
        # Find connections based on distances, not adjacency
        #
        first, second = self.find_adjacent_by_distance(i, j, a)
        #
        # Do all connected components.
        #
        if len(first) > 0:
            ij_labels = all_connected_components(first, second) + 1
            nlabels = np.max(ij_labels)
            label_indexes = np.arange(1, nlabels + 1)
            #
            # Compute the measurements
            #
            center_x = fix(mean_of_labels(j, ij_labels, label_indexes))
            center_y = fix(mean_of_labels(i, ij_labels, label_indexes))
            #
            # The angles are wierdly complicated because of the wrap-around.
            # You can imagine some horrible cases, like a circular patch of
            # "worm" in which all angles are represented or a gentle "U"
            # curve.
            #
            # For now, I'm going to use the following heuristic:
            #
            # Compute two different "angles". The angles of one go
            # from 0 to 180 and the angles of the other go from -90 to 90.
            # Take the variance of these from the mean and
            # choose the representation with the lowest variance.
            #
            # An alternative would be to compute the variance at each possible
            # dividing point. Another alternative would be to actually trace through
            # the connected components - both overkill for such an inconsequential
            # measurement I hope.
            #
            angles = fix(mean_of_labels(a, ij_labels, label_indexes))
            vangles = fix(
                mean_of_labels((a - angles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aa = a.copy()
            aa[a > np.pi / 2] -= np.pi
            aangles = fix(mean_of_labels(aa, ij_labels, label_indexes))
            vaangles = fix(
                mean_of_labels((aa - aangles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aangles[aangles < 0] += np.pi
            angles[vaangles < vangles] = aangles[vaangles < vangles]
            #
            # Squish the labels to 2-d. The labels for overlaps are arbitrary.
            #
            labels = np.zeros(mask.shape, int)
            labels[i, j] = ij_labels
        else:
            center_x = np.zeros(0, int)
            center_y = np.zeros(0, int)
            angles = np.zeros(0)
            nlabels = 0
            label_indexes = np.zeros(0, int)
            labels = np.zeros(mask.shape, int)

        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        object_name = self.object_name.value
        m.add_measurement(object_name, I.M_LOCATION_CENTER_X, center_x)
        m.add_measurement(object_name, I.M_LOCATION_CENTER_Y, center_y)
        m.add_measurement(object_name, M_ANGLE, angles * 180 / np.pi)
        m.add_measurement(object_name, I.M_NUMBER_OBJECT_NUMBER, label_indexes)
        m.add_image_measurement(I.FF_COUNT % object_name, nlabels)
        #
        # Make the objects
        #
        object_set = workspace.object_set
        assert isinstance(object_set, cpo.ObjectSet)
        objects = cpo.Objects()
        objects.segmented = labels
        objects.parent_image = image
        object_set.add_objects(objects, object_name)
        if self.show_window:
            workspace.display_data.i = center_y
            workspace.display_data.j = center_x
            workspace.display_data.angle = angles
            workspace.display_data.mask = mask
            workspace.display_data.labels = labels
            workspace.display_data.count = nlabels
    def measure_zernike(self, pixels, labels, indexes, centers, radius, n, m):
        # I'll put some documentation in here to explain what it does.
        # If someone ever wants to call it, their editor might display
        # the documentation.
        '''Measure the intensity of the image with Zernike (N, M)

        pixels - the intensity image to be measured
        labels - the labels matrix that labels each object with an integer
        indexes - the label #s in the image
        centers - the centers of the minimum enclosing circle for each object
        radius - the radius of the minimum enclosing circle for each object
        n, m - the Zernike coefficients.

        See http://en.wikipedia.org/wiki/Zernike_polynomials for an
        explanation of the Zernike polynomials
        '''
        #
        # The strategy here is to operate on the whole array instead
        # of operating on one object at a time. The most important thing
        # is to avoid having to run the Python interpreter once per pixel
        # in the image and the second most important is to avoid running
        # it per object in case there are hundreds of objects.
        #
        # We play lots of indexing tricks here to operate on the whole image.
        # I'll try to explain some - hopefully, you can reuse.
        center_x = centers[:, 1]
        center_y = centers[:, 0]
        #
        # Make up fake values for 0 (the background). This lets us do our
        # indexing tricks. Really, we're going to ignore the background,
        # but we want to do the indexing without ignoring the background
        # because that's easier.
        #
        center_x = np.hstack([[0], center_x])
        center_y = np.hstack([[0], center_y])
        radius = np.hstack([[1], radius])
        #
        # Now get one array that's the y coordinate of each pixel and one
        # that's the x coordinate. This might look stupid and wasteful,
        # but these "arrays" are never actually realized and made into
        # real memory.
        #
        y, x = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
        #
        # Get the x and y coordinates relative to the object centers.
        # This uses Numpy broadcasting. For each pixel, we use the
        # value in the labels matrix as an index into the appropriate
        # one-dimensional array. So we get the value for that object.
        #
        y -= center_y[labels]
        x -= center_x[labels]
        #
        # Zernikes take x and y values from zero to one. We scale the
        # integer coordinate values by dividing them by the radius of
        # the circle. Again, we use the indexing trick to look up the
        # values for each object.
        #
        y = y.astype(float) / radius[labels]
        x = x.astype(float) / radius[labels]
        #
        #################################
        #
        # ZERNIKE POLYNOMIALS
        #
        # Now we can get Zernike polynomials per-pixel where each pixel
        # value is calculated according to its object's MEC.
        #
        # We use a mask of all of the non-zero labels so the calculation
        # runs a little faster.
        #
        zernike_polynomial = construct_zernike_polynomials(
            x, y, np.array([[n, m]]), labels > 0)
        #
        # For historical reasons, CellProfiler didn't multiply by the per/zernike
        # normalizing factor: 2*n + 2 / E / pi where E is 2 if m is zero and 1
        # if m is one. We do it here to aid with the reconstruction
        #
        zernike_polynomial *= (2 * n + 2) / (2 if m == 0 else 1) / np.pi
        #
        # Multiply the Zernike polynomial by the image to dissect
        # the image by the Zernike basis set.
        #
        output_pixels = pixels * zernike_polynomial[:, :, 0]
        #
        # Finally, we use Scipy to sum the intensities. Scipy has different
        # versions with different quirks. The "fix" function takes all
        # of that into account.
        #
        # The sum function calculates the sum of the pixel values for
        # each pixel in an object, using the labels matrix to name
        # the pixels in an object
        #
        zr = fix(scind.sum(output_pixels.real, labels, indexes))
        zi = fix(scind.sum(output_pixels.imag, labels, indexes))
        #
        # And we're done! Did you like it? Did you get it?
        #
        return zr, zi
Exemplo n.º 35
0
def cooccurrence(quantized_image, labels, scale_i=3, scale_j=0):
    """Calculates co-occurrence matrices for all the objects in the image.

    Return an array P of shape (nobjects, nlevels, nlevels) such that
    P[o, :, :] is the cooccurence matrix for object o.

    quantized_image -- a numpy array of integer type
    labels          -- a numpy array of integer type
    scale           -- an integer

    For each object O, the cooccurrence matrix is defined as follows.
    Given a row number I in the matrix, let A be the set of pixels in
    O with gray level I, excluding pixels in the rightmost S
    columns of the image.  Let B be the set of pixels in O that are S
    pixels to the right of a pixel in A.  Row I of the cooccurence
    matrix is the gray-level histogram of the pixels in B.
    """
    labels = labels.astype(int)
    nlevels = quantized_image.max() + 1
    nobjects = labels.max()
    if scale_i < 0:
        scale_i = -scale_i
        scale_j = -scale_j
    if scale_i == 0 and scale_j > 0:
        image_a = quantized_image[:, :-scale_j]
        image_b = quantized_image[:, scale_j:]
        labels_ab = labels_a = labels[:, :-scale_j]
        labels_b = labels[:, scale_j:]
    elif scale_i > 0 and scale_j == 0:
        image_a = quantized_image[:-scale_i, :]
        image_b = quantized_image[scale_i:, :]
        labels_ab = labels_a = labels[:-scale_i, :]
        labels_b = labels[scale_i:, :]
    elif scale_i > 0 and scale_j > 0:
        image_a = quantized_image[:-scale_i, :-scale_j]
        image_b = quantized_image[scale_i:, scale_j:]
        labels_ab = labels_a = labels[:-scale_i, :-scale_j]
        labels_b = labels[scale_i:, scale_j:]
    else:
        # scale_j should be negative
        image_a = quantized_image[:-scale_i, -scale_j:]
        image_b = quantized_image[scale_i:, :scale_j]
        labels_ab = labels_a = labels[:-scale_i, -scale_j:]
        labels_b = labels[scale_i:, :scale_j]
    equilabel = ((labels_a == labels_b) & (labels_a > 0))
    if np.any(equilabel):

        Q = (nlevels * nlevels * (labels_ab[equilabel] - 1) +
             nlevels * image_a[equilabel] + image_b[equilabel])
        R = np.bincount(Q)
        if R.size != nobjects * nlevels * nlevels:
            S = np.zeros(nobjects * nlevels * nlevels - R.size)
            R = np.hstack((R, S))
        P = R.reshape(nobjects, nlevels, nlevels)
        pixel_count = fix(
            scind.sum(equilabel, labels_ab,
                      np.arange(nobjects, dtype=np.int32) + 1))
        pixel_count = np.tile(pixel_count[:, np.newaxis, np.newaxis],
                              (1, nlevels, nlevels))
        return (P.astype(float) / pixel_count.astype(float), nlevels)
    else:
        return np.zeros((nobjects, nlevels, nlevels)), nlevels
Exemplo n.º 36
0
 def run(self, workspace):
     '''Run the module on an image set'''
     
     object_name = self.object_name.value
     remaining_object_name = self.remaining_objects.value
     original_objects = workspace.object_set.get_objects(object_name)
     
     if self.mask_choice == MC_IMAGE:
         mask = workspace.image_set.get_image(self.masking_image.value,
                                              must_be_binary = True)
         mask = mask.pixel_data
     else:
         masking_objects = workspace.object_set.get_objects(
             self.masking_objects.value)
         mask = masking_objects.segmented > 0
     if self.wants_inverted_mask:
         mask = ~mask
     #
     # Load the labels
     #
     labels = original_objects.segmented.copy()
     nobjects = np.max(labels)
     #
     # Resize the mask to cover the objects
     #
     mask, m1 = cpo.size_similarly(labels, mask)
     mask[~m1] = False
     #
     # Apply the mask according to the overlap choice.
     #
     if nobjects == 0:
         pass
     elif self.overlap_choice == P_MASK:
         labels = labels * mask
     else:
         pixel_counts = fix(scind.sum(mask, labels, 
                                      np.arange(1, nobjects+1,dtype=np.int32)))
         if self.overlap_choice == P_KEEP:
             keep = pixel_counts > 0
         else:
             total_pixels = fix(scind.sum(np.ones(labels.shape), labels,
                                          np.arange(1, nobjects+1,dtype=np.int32)))
             if self.overlap_choice == P_REMOVE:
                 keep = pixel_counts == total_pixels
             elif self.overlap_choice == P_REMOVE_PERCENTAGE:
                 fraction = self.overlap_fraction.value
                 keep = pixel_counts / total_pixels >= fraction
             else:
                 raise NotImplementedError("Unknown overlap-handling choice: %s",
                                           self.overlap_choice.value)
         keep = np.hstack(([False], keep))
         labels[~ keep[labels]] = 0
     #
     # Renumber the labels matrix if requested
     #
     if self.retain_or_renumber == R_RENUMBER:
         unique_labels = np.unique(labels[labels!=0])
         indexer = np.zeros(nobjects+1, int)
         indexer[unique_labels] = np.arange(1, len(unique_labels)+1)
         labels = indexer[labels]
         parent_objects = unique_labels
     else:
         parent_objects = np.arange(1, nobjects+1)
     #
     # Add the objects
     #
     remaining_objects = cpo.Objects()
     remaining_objects.segmented = labels
     remaining_objects.unedited_segmented = original_objects.unedited_segmented
     workspace.object_set.add_objects(remaining_objects, 
                                      remaining_object_name)
     #
     # Add measurements
     #
     m = workspace.measurements
     m.add_measurement(remaining_object_name,
                       I.FF_PARENT % object_name,
                       parent_objects)
     if np.max(original_objects.segmented) == 0:
         child_count = np.array([],int)
     else:
         child_count = fix(scind.sum(labels, original_objects.segmented,
                                     np.arange(1, nobjects+1,dtype=np.int32)))
         child_count = (child_count > 0).astype(int)
     m.add_measurement(object_name,
                       I.FF_CHILDREN_COUNT % remaining_object_name,
                       child_count)
     if self.retain_or_renumber == R_RETAIN:
         remaining_object_count = nobjects
     else:
         remaining_object_count = len(unique_labels)
     I.add_object_count_measurements(m, remaining_object_name,
                                     remaining_object_count)
     I.add_object_location_measurements(m, remaining_object_name, labels)
     #
     # Add an outline if asked to do so
     #
     if self.wants_outlines.value:
         outline_image = cpi.Image(outline(labels) > 0,
                                   parent_image = original_objects.parent_image)
         workspace.image_set.add(self.outlines_name.value, outline_image)
     #
     # Save the input, mask and output images for display
     #
     if self.show_window:
         workspace.display_data.original_labels = original_objects.segmented
         workspace.display_data.final_labels = labels
         workspace.display_data.mask = mask
Exemplo n.º 37
0
    def run_image_pair_objects(self, workspace, first_image_name,
                               second_image_name, object_name):
        '''Calculate per-object correlations between intensities in two images'''
        first_image = workspace.image_set.get_image(first_image_name,
                                                    must_be_grayscale=True)
        second_image = workspace.image_set.get_image(second_image_name,
                                                     must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        #
        # Crop both images to the size of the labels matrix
        #
        labels = objects.segmented
        try:
            first_pixels  = objects.crop_image_similarly(first_image.pixel_data)
            first_mask    = objects.crop_image_similarly(first_image.mask)
        except ValueError:
            first_pixels, m1 = cpo.size_similarly(labels, first_image.pixel_data)
            first_mask, m1 = cpo.size_similarly(labels, first_image.mask)
            first_mask[~m1] = False
        try:
            second_pixels = objects.crop_image_similarly(second_image.pixel_data)
            second_mask   = objects.crop_image_similarly(second_image.mask)
        except ValueError:
            second_pixels, m1 = cpo.size_similarly(labels, second_image.pixel_data)
            second_mask, m1 = cpo.size_similarly(labels, second_image.mask)
            second_mask[~m1] = False
        mask   = ((labels > 0) & first_mask & second_mask)
        first_pixels = first_pixels[mask]
        second_pixels = second_pixels[mask]
        labels = labels[mask]
        result = []
        first_pixel_data = first_image.pixel_data
        first_mask = first_image.mask
        first_pixel_count = np.product(first_pixel_data.shape)
        second_pixel_data = second_image.pixel_data
        second_mask = second_image.mask
        second_pixel_count = np.product(second_pixel_data.shape)
        #
        # Crop the larger image similarly to the smaller one
        #
        if first_pixel_count < second_pixel_count:
            second_pixel_data = first_image.crop_image_similarly(second_pixel_data)
            second_mask = first_image.crop_image_similarly(second_mask)
        elif second_pixel_count < first_pixel_count:
            first_pixel_data = second_image.crop_image_similarly(first_pixel_data)
            first_mask = second_image.crop_image_similarly(first_mask)
        mask = (first_mask & second_mask &
                (~ np.isnan(first_pixel_data)) &
                (~ np.isnan(second_pixel_data)))
        if np.any(mask):
            #
            # Perform the correlation, which returns:
            # [ [ii, ij],
            #   [ji, jj] ]
            #
            fi = first_pixel_data[mask]
            si = second_pixel_data[mask]
                            
        n_objects = objects.count
        if n_objects == 0:
            corr = np.zeros((0,))
            overlap = np.zeros((0,))
            K1 = np.zeros((0,))
            K2 = np.zeros((0,))
            M1 = np.zeros((0,))
            M2 = np.zeros((0,))
            RWC1 = np.zeros((0,))
            RWC2 = np.zeros((0,))
            C1 = np.zeros((0,))
            C2 = np.zeros((0,))
        else:
            #
            # The correlation is sum((x-mean(x))(y-mean(y)) /
            #                         ((n-1) * std(x) *std(y)))
            #
            lrange = np.arange(n_objects,dtype=np.int32)+1
            area  = fix(scind.sum(np.ones_like(labels), labels, lrange))
            mean1 = fix(scind.mean(first_pixels, labels, lrange))
            mean2 = fix(scind.mean(second_pixels, labels, lrange))
            #
            # Calculate the standard deviation times the population.
            #
            std1 = np.sqrt(fix(scind.sum((first_pixels-mean1[labels-1])**2,
                                         labels, lrange)))
            std2 = np.sqrt(fix(scind.sum((second_pixels-mean2[labels-1])**2,
                                         labels, lrange)))
            x = first_pixels - mean1[labels-1]  # x - mean(x)
            y = second_pixels - mean2[labels-1] # y - mean(y)
            corr = fix(scind.sum(x * y / (std1[labels-1] * std2[labels-1]),
                                 labels, lrange))
            # Explicitly set the correlation to NaN for masked objects
            corr[scind.sum(1, labels, lrange) == 0] = np.NaN
            result += [[first_image_name, second_image_name, object_name,"Mean Correlation coeff","%.3f"%np.mean(corr)],
                       [first_image_name, second_image_name, object_name,"Median Correlation coeff","%.3f"%np.median(corr)],
                       [first_image_name, second_image_name, object_name,"Min Correlation coeff","%.3f"%np.min(corr)],
                       [first_image_name, second_image_name, object_name,"Max Correlation coeff","%.3f"%np.max(corr)]]

            # Threshold as percentage of maximum intensity of objects in each channel
            tff = (self.thr.value/100) * fix(scind.maximum(first_pixels,labels, lrange))
            tss = (self.thr.value/100) * fix(scind.maximum(second_pixels,labels, lrange))
        
            combined_thresh = (first_pixels> tff[labels-1]) & (second_pixels > tss[labels-1])
            fi_thresh = first_pixels[combined_thresh]
            si_thresh = second_pixels[combined_thresh]
            tot_fi_thr = scind.sum(first_pixels[first_pixels > tff[labels-1]], labels[first_pixels > tff[labels-1]],lrange)
            tot_si_thr = scind.sum(second_pixels[second_pixels > tss[labels-1]], labels[second_pixels > tss[labels-1]],lrange)
            
            
            nonZero = (fi > 0) | (si > 0)
            xvar = np.var(fi[nonZero],axis=0,ddof=1)
            yvar = np.var(si[nonZero],axis=0,ddof=1)
            
            xmean = np.mean(fi[nonZero], axis=0)
            ymean = np.mean(si[nonZero], axis=0)
            
            z = fi[nonZero] + si[nonZero]
            zvar = np.var(z,axis=0,ddof=1)
            
            covar = 0.5 * (zvar - (xvar+yvar))
            
            denom = 2 * covar
            num = (yvar-xvar) + np.sqrt((yvar-xvar)*(yvar-xvar)+4*(covar*covar))
            a = (num/denom)
            b = (ymean - a*xmean)
            
            i = 1
            while(i > 0.003921568627):
                thr_fi_c = i
                thr_si_c = (a*i)+b
                combt = (fi < thr_fi_c) | (si < thr_si_c)
                costReg = scistat.pearsonr(fi[combt], si[combt])
                if(costReg[0] <= 0):
                    break
                i= i-0.003921568627
            
            # Costes' thershold for entire image is applied to each object
            fi_above_thr = first_pixels > thr_fi_c
            si_above_thr = second_pixels > thr_si_c
            combined_thresh_c = fi_above_thr & si_above_thr
            fi_thresh_c = first_pixels[combined_thresh_c]
            si_thresh_c = second_pixels[combined_thresh_c]
            if np.any(fi_above_thr):
                tot_fi_thr_c = scind.sum(first_pixels[first_pixels > thr_fi_c],labels[first_pixels > thr_fi_c],lrange)
            else:
                tot_fi_thr_c = np.zeros(len(lrange))
            if np.any(si_above_thr):
                tot_si_thr_c = scind.sum(second_pixels[second_pixels > thr_si_c],labels[second_pixels > thr_si_c],lrange)
            else:
                tot_si_thr_c = np.zeros(len(lrange))
            
            # Manders Coefficient
            M1 = np.zeros(len(lrange))
            M2 = np.zeros(len(lrange))
            
            if np.any(combined_thresh):
                M1 = np.array(scind.sum(fi_thresh,labels[combined_thresh],lrange)) / np.array(tot_fi_thr)
                M2 = np.array(scind.sum(si_thresh,labels[combined_thresh],lrange)) / np.array(tot_si_thr)
            result += [[first_image_name, second_image_name, object_name,"Mean Manders coeff","%.3f"%np.mean(M1)],
                       [first_image_name, second_image_name, object_name,"Median Manders coeff","%.3f"%np.median(M1)],
                       [first_image_name, second_image_name, object_name,"Min Manders coeff","%.3f"%np.min(M1)],
                       [first_image_name, second_image_name, object_name,"Max Manders coeff","%.3f"%np.max(M1)]]
            result += [[second_image_name, first_image_name, object_name,"Mean Manders coeff","%.3f"%np.mean(M2)],
                       [second_image_name, first_image_name, object_name,"Median Manders coeff","%.3f"%np.median(M2)],
                       [second_image_name, first_image_name, object_name,"Min Manders coeff","%.3f"%np.min(M2)],
                       [second_image_name, first_image_name, object_name,"Max Manders coeff","%.3f"%np.max(M2)]]
            
            # RWC Coefficient
            RWC1 = np.zeros(len(lrange))
            RWC2 = np.zeros(len(lrange))
            [Rank1] = np.lexsort(([labels], [first_pixels]))
            [Rank2] = np.lexsort(([labels], [second_pixels]))
            Rank1_U = np.hstack([[False],first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]])
            Rank2_U = np.hstack([[False],second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]])
            Rank1_S = np.cumsum(Rank1_U)
            Rank2_S = np.cumsum(Rank2_U)
            Rank_im1 = np.zeros(first_pixels.shape, dtype=int)
            Rank_im2 = np.zeros(second_pixels.shape, dtype=int)
            Rank_im1[Rank1] = Rank1_S
            Rank_im2[Rank2] = Rank2_S
            
            R = max(Rank_im1.max(), Rank_im2.max()) + 1
            Di = abs(Rank_im1 - Rank_im2)
            weight = (R-Di) * 1.0 / R
            weight_thresh = weight[combined_thresh]
            if np.any(combined_thresh_c):
                RWC1 = np.array(scind.sum(fi_thresh * weight_thresh,labels[combined_thresh],lrange)) / np.array(tot_fi_thr)
                RWC2 = np.array(scind.sum(si_thresh * weight_thresh,labels[combined_thresh],lrange)) / np.array(tot_si_thr)
                
            result += [[first_image_name, second_image_name, object_name,"Mean RWC coeff","%.3f"%np.mean(RWC1)],
                       [first_image_name, second_image_name, object_name,"Median RWC coeff","%.3f"%np.median(RWC1)],
                       [first_image_name, second_image_name, object_name,"Min RWC coeff","%.3f"%np.min(RWC1)],
                       [first_image_name, second_image_name, object_name,"Max RWC coeff","%.3f"%np.max(RWC1)]]
            result += [[second_image_name, first_image_name, object_name,"Mean RWC coeff","%.3f"%np.mean(RWC2)],
                       [second_image_name, first_image_name, object_name,"Median RWC coeff","%.3f"%np.median(RWC2)],
                       [second_image_name, first_image_name, object_name,"Min RWC coeff","%.3f"%np.min(RWC2)],
                       [second_image_name, first_image_name, object_name,"Max RWC coeff","%.3f"%np.max(RWC2)]]
            
            
            #Costes Automated Threshold
            C1 = np.zeros(len(lrange))
            C2 = np.zeros(len(lrange))
            if np.any(combined_thresh_c):
                C1 = np.array(scind.sum(fi_thresh_c,labels[combined_thresh_c],lrange)) / np.array(tot_fi_thr_c)
                C2 = np.array(scind.sum(si_thresh_c,labels[combined_thresh_c],lrange)) / np.array(tot_si_thr_c)
            result += [[first_image_name, second_image_name, object_name,"Mean Manders coeff (Costes)","%.3f"%np.mean(C1)],
                       [first_image_name, second_image_name, object_name,"Median Manders coeff (Costes)","%.3f"%np.median(C1)],
                       [first_image_name, second_image_name, object_name,"Min Manders coeff (Costes)","%.3f"%np.min(C1)],
                       [first_image_name, second_image_name, object_name,"Max Manders coeff (Costes)","%.3f"%np.max(C1)]
                       ]
            result += [[second_image_name, first_image_name, object_name,"Mean Manders coeff (Costes)","%.3f"%np.mean(C2)],
                       [second_image_name, first_image_name, object_name,"Median Manders coeff (Costes)","%.3f"%np.median(C2)],
                       [second_image_name, first_image_name, object_name,"Min Manders coeff (Costes)","%.3f"%np.min(C2)],
                       [second_image_name, first_image_name, object_name,"Max Manders coeff (Costes)","%.3f"%np.max(C2)]
                       ]
            
            
            # Overlap Coefficient
            fpsq = scind.sum(first_pixels[combined_thresh]**2,labels[combined_thresh],lrange)
            spsq = scind.sum(second_pixels[combined_thresh]**2,labels[combined_thresh],lrange)
            pdt = np.sqrt(np.array(fpsq) * np.array(spsq))
            
            if np.any(combined_thresh):
                overlap = fix(scind.sum(first_pixels[combined_thresh] * second_pixels[combined_thresh], labels[combined_thresh], lrange) / pdt)
                K1 = fix((scind.sum(first_pixels[combined_thresh] * second_pixels[combined_thresh], labels[combined_thresh], lrange)) / (np.array(fpsq)))
                K2 = fix(scind.sum(first_pixels[combined_thresh] * second_pixels[combined_thresh], labels[combined_thresh], lrange) / np.array(spsq))
            else:
                overlap = K1 = K2 = np.zeros(len(lrange))
            result += [[first_image_name, second_image_name, object_name,"Mean Overlap coeff","%.3f"%np.mean(overlap)],
                       [first_image_name, second_image_name, object_name,"Median Overlap coeff","%.3f"%np.median(overlap)],
                       [first_image_name, second_image_name, object_name,"Min Overlap coeff","%.3f"%np.min(overlap)],
                       [first_image_name, second_image_name, object_name,"Max Overlap coeff","%.3f"%np.max(overlap)]]

        measurement = ("Correlation_Correlation_%s_%s" %
                       (first_image_name, second_image_name))
        overlap_measurement = (F_OVERLAP_FORMAT%(first_image_name,
                                                 second_image_name))
        k_measurement_1 = (F_K_FORMAT%(first_image_name,
                                       second_image_name))
        k_measurement_2 = (F_K_FORMAT%(second_image_name,
                                       first_image_name))
        manders_measurement_1 = (F_MANDERS_FORMAT%(first_image_name,
                                                   second_image_name))
        manders_measurement_2 = (F_MANDERS_FORMAT%(second_image_name,
                                                   first_image_name))
        rwc_measurement_1 = (F_RWC_FORMAT%(first_image_name,
                                           second_image_name))
        rwc_measurement_2 = (F_RWC_FORMAT%(second_image_name,
                                           first_image_name))
        costes_measurement_1 = (F_COSTES_FORMAT%(first_image_name,
                                                 second_image_name))
        costes_measurement_2 = (F_COSTES_FORMAT%(second_image_name,
                                                 first_image_name))
        
        workspace.measurements.add_measurement(object_name, measurement, corr)
        workspace.measurements.add_measurement(object_name,overlap_measurement, overlap)
        workspace.measurements.add_measurement(object_name,k_measurement_1, K1)
        workspace.measurements.add_measurement(object_name,k_measurement_2, K2)
        workspace.measurements.add_measurement(object_name,manders_measurement_1, M1)
        workspace.measurements.add_measurement(object_name,manders_measurement_2, M2)
        workspace.measurements.add_measurement(object_name,rwc_measurement_1, RWC1)
        workspace.measurements.add_measurement(object_name,rwc_measurement_2, RWC2)
        workspace.measurements.add_measurement(object_name,costes_measurement_1, C1)
        workspace.measurements.add_measurement(object_name,costes_measurement_2, C2)
        
        if n_objects == 0:
            return [[first_image_name, second_image_name, object_name,
                     "Mean correlation","-"],
                    [first_image_name, second_image_name, object_name,
                     "Median correlation","-"],
                    [first_image_name, second_image_name, object_name,
                     "Min correlation","-"],
                    [first_image_name, second_image_name, object_name,
                     "Max correlation","-"]]
        else:
            return result
Exemplo n.º 38
0
    def make_neuron_graph(self, skeleton, skeleton_labels,
                          trunks, branchpoints, endpoints, image):
        '''Make a table that captures the graph relationship of the skeleton

        skeleton - binary skeleton image + outline of seed objects
        skeleton_labels - labels matrix of skeleton
        trunks - binary image with trunk points as 1
        branchpoints - binary image with branchpoints as 1
        endpoints - binary image with endpoints as 1
        image - image for intensity measurement

        returns two tables.
        Table 1: edge table
        The edge table is a numpy record array with the following named
        columns in the following order:
        v1: index into vertex table of first vertex of edge
        v2: index into vertex table of second vertex of edge
        length: # of intermediate pixels + 2 (for two vertices)
        total_intensity: sum of intensities along the edge

        Table 2: vertex table
        The vertex table is a numpy record array:
        i: I coordinate of the vertex
        j: J coordinate of the vertex
        label: the vertex's label
        kind: kind of vertex = "T" for trunk, "B" for branchpoint or "E" for endpoint.
        '''
        i, j = np.mgrid[0:skeleton.shape[0], 0:skeleton.shape[1]]
        #
        # Give each point of interest a unique number
        #
        points_of_interest = trunks | branchpoints | endpoints
        number_of_points = np.sum(points_of_interest)
        #
        # Make up the vertex table
        #
        tbe = np.zeros(points_of_interest.shape, '|S1')
        tbe[trunks] = 'T'
        tbe[branchpoints] = 'B'
        tbe[endpoints] = 'E'
        i_idx = i[points_of_interest]
        j_idx = j[points_of_interest]
        poe_labels = skeleton_labels[points_of_interest]
        tbe = tbe[points_of_interest]
        vertex_table = {
            self.VF_I: i_idx,
            self.VF_J: j_idx,
            self.VF_LABELS: poe_labels,
            self.VF_KIND: tbe}
        #
        # First, break the skeleton by removing the branchpoints, endpoints
        # and trunks
        #
        broken_skeleton = skeleton & (~points_of_interest)
        #
        # Label the broken skeleton: this labels each edge differently
        #
        edge_labels, nlabels = morph.label_skeleton(skeleton)
        #
        # Reindex after removing the points of interest
        #
        edge_labels[points_of_interest] = 0
        if nlabels > 0:
            indexer = np.arange(nlabels + 1)
            unique_labels = np.sort(np.unique(edge_labels))
            nlabels = len(unique_labels) - 1
            indexer[unique_labels] = np.arange(len(unique_labels))
            edge_labels = indexer[edge_labels]
            #
            # find magnitudes and lengths for all edges
            #
            magnitudes = fix(scind.sum(image, edge_labels, np.arange(1, nlabels + 1, dtype=np.int32)))
            lengths = fix(scind.sum(np.ones(edge_labels.shape),
                                    edge_labels, np.arange(1, nlabels + 1, dtype=np.int32))).astype(int)
        else:
            magnitudes = np.zeros(0)
            lengths = np.zeros(0, int)
        #
        # combine the edge labels and indexes of points of interest with padding
        #
        edge_mask = edge_labels != 0
        all_labels = np.zeros(np.array(edge_labels.shape) + 2, int)
        all_labels[1:-1, 1:-1][edge_mask] = edge_labels[edge_mask] + number_of_points
        all_labels[i_idx + 1, j_idx + 1] = np.arange(1, number_of_points + 1)
        #
        # Collect all 8 neighbors for each point of interest
        #
        p1 = np.zeros(0, int)
        p2 = np.zeros(0, int)
        for i_off, j_off in ((0, 0), (0, 1), (0, 2),
                             (1, 0), (1, 2),
                             (2, 0), (2, 1), (2, 2)):
            p1 = np.hstack((p1, np.arange(1, number_of_points + 1)))
            p2 = np.hstack((p2, all_labels[i_idx + i_off, j_idx + j_off]))
        #
        # Get rid of zeros which are background
        #
        p1 = p1[p2 != 0]
        p2 = p2[p2 != 0]
        #
        # Find point_of_interest -> point_of_interest connections.
        #
        p1_poi = p1[(p2 <= number_of_points) & (p1 < p2)]
        p2_poi = p2[(p2 <= number_of_points) & (p1 < p2)]
        #
        # Make sure matches are labeled the same
        #
        same_labels = (skeleton_labels[i_idx[p1_poi - 1], j_idx[p1_poi - 1]] ==
                       skeleton_labels[i_idx[p2_poi - 1], j_idx[p2_poi - 1]])
        p1_poi = p1_poi[same_labels]
        p2_poi = p2_poi[same_labels]
        #
        # Find point_of_interest -> edge
        #
        p1_edge = p1[p2 > number_of_points]
        edge = p2[p2 > number_of_points]
        #
        # Now, each value that p2_edge takes forms a group and all
        # p1_edge whose p2_edge are connected together by the edge.
        # Possibly they touch each other without the edge, but we will
        # take the minimum distance connecting each pair to throw out
        # the edge.
        #
        edge, p1_edge, p2_edge = morph.pairwise_permutations(edge, p1_edge)
        indexer = edge - number_of_points - 1
        lengths = lengths[indexer]
        magnitudes = magnitudes[indexer]
        #
        # OK, now we make the edge table. First poi<->poi. Length = 2,
        # magnitude = magnitude at each point
        #
        poi_length = np.ones(len(p1_poi)) * 2
        poi_magnitude = (image[i_idx[p1_poi - 1], j_idx[p1_poi - 1]] +
                         image[i_idx[p2_poi - 1], j_idx[p2_poi - 1]])
        #
        # Now the edges...
        #
        poi_edge_length = lengths + 2
        poi_edge_magnitude = (image[i_idx[p1_edge - 1], j_idx[p1_edge - 1]] +
                              image[i_idx[p2_edge - 1], j_idx[p2_edge - 1]] +
                              magnitudes)
        #
        # Put together the columns
        #
        v1 = np.hstack((p1_poi, p1_edge))
        v2 = np.hstack((p2_poi, p2_edge))
        lengths = np.hstack((poi_length, poi_edge_length))
        magnitudes = np.hstack((poi_magnitude, poi_edge_magnitude))
        #
        # Sort by p1, p2 and length in order to pick the shortest length
        #
        indexer = np.lexsort((lengths, v1, v2))
        v1 = v1[indexer]
        v2 = v2[indexer]
        lengths = lengths[indexer]
        magnitudes = magnitudes[indexer]
        if len(v1) > 0:
            to_keep = np.hstack(([True],
                                 (v1[1:] != v1[:-1]) |
                                 (v2[1:] != v2[:-1])))
            v1 = v1[to_keep]
            v2 = v2[to_keep]
            lengths = lengths[to_keep]
            magnitudes = magnitudes[to_keep]
        #
        # Put it all together into a table
        #
        edge_table = {
            self.EF_V1: v1,
            self.EF_V2: v2,
            self.EF_LENGTH: lengths,
            self.EF_TOTAL_INTENSITY: magnitudes
        }
        return edge_table, vertex_table
Exemplo n.º 39
0
 def calculate_minimum_distances(self, workspace, parent_name):
     '''Calculate the distance from child center to parent perimeter'''
     meas = workspace.measurements
     assert isinstance(meas, cpmeas.Measurements)
     sub_object_name = self.sub_object_name.value
     parents = workspace.object_set.get_objects(parent_name)
     children = workspace.object_set.get_objects(sub_object_name)
     parents_of = self.get_parents_of(workspace, parent_name)
     if len(parents_of) == 0:
         dist = np.zeros((0, ))
     elif np.all(parents_of == 0):
         dist = np.array([np.NaN] * len(parents_of))
     else:
         mask = parents_of > 0
         ccenters = centers_of_labels(children.segmented).transpose()
         ccenters = ccenters[mask, :]
         parents_of_masked = parents_of[mask] - 1
         pperim = outline(parents.segmented)
         #
         # Get a list of all points on the perimeter
         #
         perim_loc = np.argwhere(pperim != 0)
         #
         # Get the label # for each point
         #
         perim_idx = pperim[perim_loc[:, 0], perim_loc[:, 1]]
         #
         # Sort the points by label #
         #
         idx = np.lexsort((perim_loc[:, 1], perim_loc[:, 0], perim_idx))
         perim_loc = perim_loc[idx, :]
         perim_idx = perim_idx[idx]
         #
         # Get counts and indexes to each run of perimeter points
         #
         counts = fix(
             scind.sum(np.ones(len(perim_idx)), perim_idx,
                       np.arange(1, perim_idx[-1] + 1))).astype(np.int32)
         indexes = np.cumsum(counts) - counts
         #
         # For the children, get the index and count of the parent
         #
         ccounts = counts[parents_of_masked]
         cindexes = indexes[parents_of_masked]
         #
         # Now make an array that has an element for each of that child's
         # perimeter points
         #
         clabel = np.zeros(np.sum(ccounts), int)
         #
         # cfirst is the eventual first index of each child in the
         # clabel array
         #
         cfirst = np.cumsum(ccounts) - ccounts
         clabel[cfirst[1:]] += 1
         clabel = np.cumsum(clabel)
         #
         # Make an index that runs from 0 to ccounts for each
         # child label.
         #
         cp_index = np.arange(len(clabel)) - cfirst[clabel]
         #
         # then add cindexes to get an index to the perimeter point
         #
         cp_index += cindexes[clabel]
         #
         # Now, calculate the distance from the centroid of each label
         # to each perimeter point in the parent.
         #
         dist = np.sqrt(
             np.sum((perim_loc[cp_index, :] - ccenters[clabel, :])**2, 1))
         #
         # Finally, find the minimum distance per child
         #
         min_dist = fix(scind.minimum(dist, clabel,
                                      np.arange(len(ccounts))))
         #
         # Account for unparented children
         #
         dist = np.array([np.NaN] * len(mask))
         dist[mask] = min_dist
     meas.add_measurement(sub_object_name, FF_MINIMUM % parent_name, dist)
Exemplo n.º 40
0
    def run(self, workspace):
        '''Run the module on an image set'''

        object_name = self.object_name.value
        remaining_object_name = self.remaining_objects.value
        original_objects = workspace.object_set.get_objects(object_name)

        if self.mask_choice == MC_IMAGE:
            mask = workspace.image_set.get_image(self.masking_image.value,
                                                 must_be_binary=True)
            mask = mask.pixel_data
        else:
            masking_objects = workspace.object_set.get_objects(
                self.masking_objects.value)
            mask = masking_objects.segmented > 0
        if self.wants_inverted_mask:
            mask = ~mask
        #
        # Load the labels
        #
        labels = original_objects.segmented.copy()
        nobjects = np.max(labels)
        #
        # Resize the mask to cover the objects
        #
        mask, m1 = cpo.size_similarly(labels, mask)
        mask[~m1] = False
        #
        # Apply the mask according to the overlap choice.
        #
        if nobjects == 0:
            pass
        elif self.overlap_choice == P_MASK:
            labels = labels * mask
        else:
            pixel_counts = fix(
                scind.sum(mask, labels,
                          np.arange(1, nobjects + 1, dtype=np.int32)))
            if self.overlap_choice == P_KEEP:
                keep = pixel_counts > 0
            else:
                total_pixels = fix(
                    scind.sum(np.ones(labels.shape), labels,
                              np.arange(1, nobjects + 1, dtype=np.int32)))
                if self.overlap_choice == P_REMOVE:
                    keep = pixel_counts == total_pixels
                elif self.overlap_choice == P_REMOVE_PERCENTAGE:
                    fraction = self.overlap_fraction.value
                    keep = pixel_counts / total_pixels >= fraction
                else:
                    raise NotImplementedError(
                        "Unknown overlap-handling choice: %s",
                        self.overlap_choice.value)
            keep = np.hstack(([False], keep))
            labels[~keep[labels]] = 0
        #
        # Renumber the labels matrix if requested
        #
        if self.retain_or_renumber == R_RENUMBER:
            unique_labels = np.unique(labels[labels != 0])
            indexer = np.zeros(nobjects + 1, int)
            indexer[unique_labels] = np.arange(1, len(unique_labels) + 1)
            labels = indexer[labels]
            parent_objects = unique_labels
        else:
            parent_objects = np.arange(1, nobjects + 1)
        #
        # Add the objects
        #
        remaining_objects = cpo.Objects()
        remaining_objects.segmented = labels
        remaining_objects.unedited_segmented = original_objects.unedited_segmented
        workspace.object_set.add_objects(remaining_objects,
                                         remaining_object_name)
        #
        # Add measurements
        #
        m = workspace.measurements
        m.add_measurement(remaining_object_name,
                          cellprofiler.measurement.FF_PARENT % object_name,
                          parent_objects)
        if np.max(original_objects.segmented) == 0:
            child_count = np.array([], int)
        else:
            child_count = fix(
                scind.sum(labels, original_objects.segmented,
                          np.arange(1, nobjects + 1, dtype=np.int32)))
            child_count = (child_count > 0).astype(int)
        m.add_measurement(
            object_name,
            cellprofiler.measurement.FF_CHILDREN_COUNT % remaining_object_name,
            child_count)
        if self.retain_or_renumber == R_RETAIN:
            remaining_object_count = nobjects
        else:
            remaining_object_count = len(unique_labels)
        I.add_object_count_measurements(m, remaining_object_name,
                                        remaining_object_count)
        I.add_object_location_measurements(m, remaining_object_name, labels)
        #
        # Save the input, mask and output images for display
        #
        if self.show_window:
            workspace.display_data.original_labels = original_objects.segmented
            workspace.display_data.final_labels = labels
            workspace.display_data.mask = mask
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image_name = self.image_name.value
        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale = True)
        workspace.display_data.statistics = []
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        else:
            thresholded_image = self.threshold_image(image_name, workspace)
            has_threshold = True

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0,:], labels_in[-1,:], labels_in[:,0], labels_in[:,-1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in)+1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~ is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances,(i,j) = scind.distance_transform_edt(labels_in == 0,
                                                               return_indices = True)
                labels_out = np.zeros(labels_in.shape,int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in,
                                                  thresholded_image,
                                                  1.0)
                labels_out[distances>self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in>0]
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in,
                                             thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image,
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1-img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img,
                                   labels_in,
                                   np.ones((3,3),bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                                objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out,
                                   objects.segmented,
                                   range(np.max(objects.segmented)+1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image = image)
                workspace.image_set.add(self.new_primary_outlines_name.value,
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)

        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image = image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add measurements
        #
        measurements = workspace.measurements
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT%objname,
                                     children_per_parent)
        measurements.add_measurement(objname,
                                     cpmi.FF_PARENT%self.primary_objects.value,
                                     parents_of_children)
        image_numbers = np.ones(len(parents_of_children), int) *\
            measurements.image_set_number
        mask = parents_of_children > 0
        measurements.add_relate_measurement(
            self.module_num, R_PARENT,
            self.primary_objects.value, self.objects_name.value,
            image_numbers[mask], parents_of_children[mask],
            image_numbers[mask],
            np.arange(1, len(parents_of_children) + 1)[mask])
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(new_objects,
                                             self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(measurements,
                                               self.new_primary_objects_name.value,
                                               np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(measurements,
                                                  self.new_primary_objects_name.value,
                                                  new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value,
                 new_objects, self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value,
                 objects_out, objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(parent_name,
                                             cpmi.FF_CHILDREN_COUNT%child_name,
                                             children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT%parent_name,
                                             parents_of_children)
        if self.show_window:
            object_area = np.sum(segmented_out > 0)
            workspace.display_data.object_pct = \
                100 * object_area / np.product(segmented_out.shape)
            workspace.display_data.img = img
            workspace.display_data.segmented_out = segmented_out
            workspace.display_data.primary_labels = objects.segmented
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.object_count = object_count
    def run(self, workspace):
        '''Run the algorithm on one image set'''
        #
        # Get the image as a binary image
        #
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value,
                                    must_be_binary = True)
        mask = image.pixel_data
        if image.has_mask:
            mask = mask & image.mask
        angle_count = self.angle_count.value
        #
        # We collect the i,j and angle of pairs of points that
        # are 3-d adjacent after erosion.
        #
        # i - the i coordinate of each point found after erosion
        # j - the j coordinate of each point found after erosion
        # a - the angle of the structuring element for each point found
        #
        i = np.zeros(0, int)
        j = np.zeros(0, int)
        a = np.zeros(0, int)

        ig, jg = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
        this_idx = 0
        for angle_number in range(angle_count):
            angle = float(angle_number) * np.pi / float(angle_count)
            strel = self.get_diamond(angle)
            erosion = binary_erosion(mask, strel)
            #
            # Accumulate the count, i, j and angle for all foreground points
            # in the erosion
            #
            this_count = np.sum(erosion)
            i = np.hstack((i, ig[erosion]))
            j = np.hstack((j, jg[erosion]))
            a = np.hstack((a, np.ones(this_count, float) * angle))
        #
        # Find connections based on distances, not adjacency
        #
        first, second = self.find_adjacent_by_distance(i,j,a)
        #
        # Do all connected components.
        #
        if len(first) > 0:
            ij_labels = all_connected_components(first, second) + 1
            nlabels = np.max(ij_labels)
            label_indexes = np.arange(1, nlabels + 1)
            #
            # Compute the measurements
            #
            center_x = fix(mean_of_labels(j, ij_labels, label_indexes))
            center_y = fix(mean_of_labels(i, ij_labels, label_indexes))
            #
            # The angles are wierdly complicated because of the wrap-around.
            # You can imagine some horrible cases, like a circular patch of
            # "worm" in which all angles are represented or a gentle "U"
            # curve.
            #
            # For now, I'm going to use the following heuristic:
            #
            # Compute two different "angles". The angles of one go
            # from 0 to 180 and the angles of the other go from -90 to 90.
            # Take the variance of these from the mean and
            # choose the representation with the lowest variance.
            #
            # An alternative would be to compute the variance at each possible
            # dividing point. Another alternative would be to actually trace through
            # the connected components - both overkill for such an inconsequential
            # measurement I hope.
            #
            angles = fix(mean_of_labels(a, ij_labels, label_indexes))
            vangles = fix(mean_of_labels((a - angles[ij_labels-1])**2,
                                         ij_labels, label_indexes))
            aa = a.copy()
            aa[a > np.pi / 2] -= np.pi
            aangles = fix(mean_of_labels(aa, ij_labels, label_indexes))
            vaangles = fix(mean_of_labels((aa-aangles[ij_labels-1])**2,
                                          ij_labels, label_indexes))
            aangles[aangles < 0] += np.pi
            angles[vaangles < vangles] = aangles[vaangles < vangles]
            #
            # Squish the labels to 2-d. The labels for overlaps are arbitrary.
            #
            labels = np.zeros(mask.shape, int)
            labels[i, j] = ij_labels
        else:
            center_x = np.zeros(0, int)
            center_y = np.zeros(0, int)
            angles = np.zeros(0)
            nlabels = 0
            label_indexes = np.zeros(0, int)
            labels = np.zeros(mask.shape, int)

        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        object_name = self.object_name.value
        m.add_measurement(object_name, I.M_LOCATION_CENTER_X, center_x)
        m.add_measurement(object_name, I.M_LOCATION_CENTER_Y, center_y)
        m.add_measurement(object_name, M_ANGLE, angles * 180 / np.pi)
        m.add_measurement(object_name, I.M_NUMBER_OBJECT_NUMBER, label_indexes)
        m.add_image_measurement(I.FF_COUNT % object_name, nlabels)
        #
        # Make the objects
        #
        object_set = workspace.object_set
        assert isinstance(object_set, cpo.ObjectSet)
        objects = cpo.Objects()
        objects.segmented = labels
        objects.parent_image = image
        object_set.add_objects(objects, object_name)
        if self.show_window:
            workspace.display_data.i = center_y
            workspace.display_data.j = center_x
            workspace.display_data.angle = angles
            workspace.display_data.mask = mask
            workspace.display_data.labels = labels
            workspace.display_data.count = nlabels
    def run(self, workspace):
        assert isinstance(workspace, cpw.Workspace)
        image_name = self.image_name.value
        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        workspace.display_data.statistics = []
        img = image.pixel_data
        mask = image.mask
        objects = workspace.object_set.get_objects(self.primary_objects.value)
        global_threshold = None
        if self.method == M_DISTANCE_N:
            has_threshold = False
        else:
            thresholded_image = self.threshold_image(image_name, workspace)
            has_threshold = True

        #
        # Get the following labels:
        # * all edited labels
        # * labels touching the edge, including small removed
        #
        labels_in = objects.unedited_segmented.copy()
        labels_touching_edge = np.hstack(
            (labels_in[0, :], labels_in[-1, :], labels_in[:,
                                                          0], labels_in[:,
                                                                        -1]))
        labels_touching_edge = np.unique(labels_touching_edge)
        is_touching = np.zeros(np.max(labels_in) + 1, bool)
        is_touching[labels_touching_edge] = True
        is_touching = is_touching[labels_in]

        labels_in[(~is_touching) & (objects.segmented == 0)] = 0
        #
        # Stretch the input labels to match the image size. If there's no
        # label matrix, then there's no label in that area.
        #
        if tuple(labels_in.shape) != tuple(img.shape):
            tmp = np.zeros(img.shape, labels_in.dtype)
            i_max = min(img.shape[0], labels_in.shape[0])
            j_max = min(img.shape[1], labels_in.shape[1])
            tmp[:i_max, :j_max] = labels_in[:i_max, :j_max]
            labels_in = tmp

        if self.method in (M_DISTANCE_B, M_DISTANCE_N):
            if self.method == M_DISTANCE_N:
                distances, (i, j) = scind.distance_transform_edt(
                    labels_in == 0, return_indices=True)
                labels_out = np.zeros(labels_in.shape, int)
                dilate_mask = distances <= self.distance_to_dilate.value
                labels_out[dilate_mask] =\
                    labels_in[i[dilate_mask],j[dilate_mask]]
            else:
                labels_out, distances = propagate(img, labels_in,
                                                  thresholded_image, 1.0)
                labels_out[distances > self.distance_to_dilate.value] = 0
                labels_out[labels_in > 0] = labels_in[labels_in > 0]
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            #
            # Create the final output labels by removing labels in the
            # output matrix that are missing from the segmented image
            #
            segmented_labels = objects.segmented
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_PROPAGATION:
            labels_out, distance = propagate(img, labels_in, thresholded_image,
                                             self.regularization_factor.value)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_G:
            #
            # First, apply the sobel filter to the image (both horizontal
            # and vertical). The filter measures gradient.
            #
            sobel_image = np.abs(scind.sobel(img))
            #
            # Combine the image mask and threshold to mask the watershed
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the first watershed
            #
            labels_out = watershed(sobel_image,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out.copy()
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)
        elif self.method == M_WATERSHED_I:
            #
            # invert the image so that the maxima are filled first
            # and the cells compete over what's close to the threshold
            #
            inverted_img = 1 - img
            #
            # Same as above, but perform the watershed on the original image
            #
            watershed_mask = np.logical_or(thresholded_image, labels_in > 0)
            watershed_mask = np.logical_and(watershed_mask, mask)
            #
            # Perform the watershed
            #
            labels_out = watershed(inverted_img,
                                   labels_in,
                                   np.ones((3, 3), bool),
                                   mask=watershed_mask)
            if self.fill_holes:
                small_removed_segmented_out = fill_labeled_holes(labels_out)
            else:
                small_removed_segmented_out = labels_out
            segmented_out = self.filter_labels(small_removed_segmented_out,
                                               objects, workspace)

        if self.wants_discard_edge and self.wants_discard_primary:
            #
            # Make a new primary object
            #
            lookup = scind.maximum(segmented_out, objects.segmented,
                                   range(np.max(objects.segmented) + 1))
            lookup = fix(lookup)
            lookup[0] = 0
            lookup[lookup != 0] = np.arange(np.sum(lookup != 0)) + 1
            segmented_labels = lookup[objects.segmented]
            segmented_out = lookup[segmented_out]
            new_objects = cpo.Objects()
            new_objects.segmented = segmented_labels
            if objects.has_unedited_segmented:
                new_objects.unedited_segmented = objects.unedited_segmented
            if objects.has_small_removed_segmented:
                new_objects.small_removed_segmented = objects.small_removed_segmented
            new_objects.parent_image = objects.parent_image
            primary_outline = outline(segmented_labels)
            if self.wants_primary_outlines:
                out_img = cpi.Image(primary_outline.astype(bool),
                                    parent_image=image)
                workspace.image_set.add(self.new_primary_outlines_name.value,
                                        out_img)
        else:
            primary_outline = outline(objects.segmented)
        secondary_outline = outline(segmented_out)

        #
        # Add the objects to the object set
        #
        objects_out = cpo.Objects()
        objects_out.unedited_segmented = small_removed_segmented_out
        objects_out.small_removed_segmented = small_removed_segmented_out
        objects_out.segmented = segmented_out
        objects_out.parent_image = image
        objname = self.objects_name.value
        workspace.object_set.add_objects(objects_out, objname)
        if self.use_outlines.value:
            out_img = cpi.Image(secondary_outline.astype(bool),
                                parent_image=image)
            workspace.image_set.add(self.outlines_name.value, out_img)
        object_count = np.max(segmented_out)
        #
        # Add measurements
        #
        measurements = workspace.measurements
        cpmi.add_object_count_measurements(measurements, objname, object_count)
        cpmi.add_object_location_measurements(measurements, objname,
                                              segmented_out)
        #
        # Relate the secondary objects to the primary ones and record
        # the relationship.
        #
        children_per_parent, parents_of_children = \
            objects.relate_children(objects_out)
        measurements.add_measurement(self.primary_objects.value,
                                     cpmi.FF_CHILDREN_COUNT % objname,
                                     children_per_parent)
        measurements.add_measurement(
            objname, cpmi.FF_PARENT % self.primary_objects.value,
            parents_of_children)
        image_numbers = np.ones(len(parents_of_children), int) *\
            measurements.image_set_number
        mask = parents_of_children > 0
        measurements.add_relate_measurement(
            self.module_num, R_PARENT, self.primary_objects.value,
            self.objects_name.value, image_numbers[mask],
            parents_of_children[mask], image_numbers[mask],
            np.arange(1,
                      len(parents_of_children) + 1)[mask])
        #
        # If primary objects were created, add them
        #
        if self.wants_discard_edge and self.wants_discard_primary:
            workspace.object_set.add_objects(
                new_objects, self.new_primary_objects_name.value)
            cpmi.add_object_count_measurements(
                measurements, self.new_primary_objects_name.value,
                np.max(new_objects.segmented))
            cpmi.add_object_location_measurements(
                measurements, self.new_primary_objects_name.value,
                new_objects.segmented)
            for parent_objects, parent_name, child_objects, child_name in (
                (objects, self.primary_objects.value, new_objects,
                 self.new_primary_objects_name.value),
                (new_objects, self.new_primary_objects_name.value, objects_out,
                 objname)):
                children_per_parent, parents_of_children = \
                    parent_objects.relate_children(child_objects)
                measurements.add_measurement(
                    parent_name, cpmi.FF_CHILDREN_COUNT % child_name,
                    children_per_parent)
                measurements.add_measurement(child_name,
                                             cpmi.FF_PARENT % parent_name,
                                             parents_of_children)
        if self.show_window:
            object_area = np.sum(segmented_out > 0)
            workspace.display_data.object_pct = \
                100 * object_area / np.product(segmented_out.shape)
            workspace.display_data.img = img
            workspace.display_data.segmented_out = segmented_out
            workspace.display_data.primary_labels = objects.segmented
            workspace.display_data.global_threshold = global_threshold
            workspace.display_data.object_count = object_count
Exemplo n.º 44
0
 def calculate_minimum_distances(self, workspace, parent_name):
     '''Calculate the distance from child center to parent perimeter'''
     meas = workspace.measurements
     assert isinstance(meas,cpmeas.Measurements)
     sub_object_name = self.sub_object_name.value
     parents = workspace.object_set.get_objects(parent_name)
     children = workspace.object_set.get_objects(sub_object_name)
     parents_of = self.get_parents_of(workspace, parent_name)
     if len(parents_of) == 0:
         dist = np.zeros((0,))
     elif np.all(parents_of == 0):
         dist = np.array([np.NaN] * len(parents_of))
     else:
         mask = parents_of > 0
         ccenters = centers_of_labels(children.segmented).transpose()
         ccenters = ccenters[mask,:]
         parents_of_masked = parents_of[mask] - 1
         pperim = outline(parents.segmented)
         #
         # Get a list of all points on the perimeter
         #
         perim_loc = np.argwhere(pperim != 0)
         #
         # Get the label # for each point
         #
         perim_idx = pperim[perim_loc[:,0],perim_loc[:,1]]
         #
         # Sort the points by label #
         #
         idx = np.lexsort((perim_loc[:,1],perim_loc[:,0],perim_idx))
         perim_loc = perim_loc[idx,:]
         perim_idx = perim_idx[idx]
         #
         # Get counts and indexes to each run of perimeter points
         #
         counts = fix(scind.sum(np.ones(len(perim_idx)),perim_idx,
                                np.arange(1,perim_idx[-1]+1))).astype(np.int32)
         indexes = np.cumsum(counts) - counts
         #
         # For the children, get the index and count of the parent
         #
         ccounts = counts[parents_of_masked]
         cindexes = indexes[parents_of_masked]
         #
         # Now make an array that has an element for each of that child's
         # perimeter points
         #
         clabel = np.zeros(np.sum(ccounts), int)
         #
         # cfirst is the eventual first index of each child in the
         # clabel array
         #
         cfirst = np.cumsum(ccounts) - ccounts
         clabel[cfirst[1:]] += 1
         clabel = np.cumsum(clabel)
         #
         # Make an index that runs from 0 to ccounts for each
         # child label.
         #
         cp_index = np.arange(len(clabel)) - cfirst[clabel]
         #
         # then add cindexes to get an index to the perimeter point
         #
         cp_index += cindexes[clabel]
         #
         # Now, calculate the distance from the centroid of each label
         # to each perimeter point in the parent.
         #
         dist = np.sqrt(np.sum((perim_loc[cp_index,:] - 
                                ccenters[clabel,:])**2,1))
         #
         # Finally, find the minimum distance per child
         #
         min_dist = fix(scind.minimum(dist, clabel, np.arange(len(ccounts))))
         #
         # Account for unparented children
         #
         dist = np.array([np.NaN] * len(mask))
         dist[mask] = min_dist
     meas.add_measurement(sub_object_name, FF_MINIMUM % parent_name, dist)
    def run(self, workspace):
        '''Run the algorithm on one image set'''
        #
        # Get the image as a binary image
        #
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value, must_be_binary=True)
        mask = image.pixel_data
        if image.has_mask:
            mask = mask & image.mask
        angle_count = self.angle_count.value
        #
        # We collect the i,j and angle of pairs of points that
        # are 3-d adjacent after erosion.
        #
        # i - the i coordinate of each point found after erosion
        # j - the j coordinate of each point found after erosion
        # a - the angle of the structuring element for each point found
        #
        i = np.zeros(0, int)
        j = np.zeros(0, int)
        a = np.zeros(0, int)

        ig, jg = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
        #this_idx = 0
        for angle_number in range(angle_count):
            angle = float(angle_number) * np.pi / float(angle_count)
            strel = self.get_diamond(angle)
            erosion = binary_erosion(mask, strel)
            #
            # Accumulate the count, i, j and angle for all foreground points
            # in the erosion
            #
            this_count = np.sum(erosion)
            i = np.hstack((i, ig[erosion]))
            j = np.hstack((j, jg[erosion]))
            a = np.hstack((a, np.ones(this_count, float) * angle))
        #
        # Find connections based on distances, not adjacency
        #
        first, second = self.find_adjacent_by_distance(i, j, a)
        #
        # Do all connected components.
        #
        if len(first) > 0:
            ij_labels = all_connected_components(first, second) + 1
            nlabels = np.max(ij_labels)
            label_indexes = np.arange(1, nlabels + 1)
            #
            # Compute the measurements
            #
            center_x = fix(mean_of_labels(j, ij_labels, label_indexes))
            center_y = fix(mean_of_labels(i, ij_labels, label_indexes))
            #
            # The angles are wierdly complicated because of the wrap-around.
            # You can imagine some horrible cases, like a circular patch of
            # "linear object" in which all angles are represented or a gentle "U"
            # curve.
            #
            # For now, I'm going to use the following heuristic:
            #
            # Compute two different "angles". The angles of one go
            # from 0 to 180 and the angles of the other go from -90 to 90.
            # Take the variance of these from the mean and
            # choose the representation with the lowest variance.
            #
            # An alternative would be to compute the variance at each possible
            # dividing point. Another alternative would be to actually trace through
            # the connected components - both overkill for such an inconsequential
            # measurement I hope.
            #
            angles = fix(mean_of_labels(a, ij_labels, label_indexes))
            vangles = fix(
                mean_of_labels((a - angles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aa = a.copy()
            aa[a > np.pi / 2] -= np.pi
            aangles = fix(mean_of_labels(aa, ij_labels, label_indexes))
            vaangles = fix(
                mean_of_labels((aa - aangles[ij_labels - 1])**2, ij_labels,
                               label_indexes))
            aangles[aangles < 0] += np.pi
            angles[vaangles < vangles] = aangles[vaangles < vangles]
        else:
            center_x = np.zeros(0, int)
            center_y = np.zeros(0, int)
            angles = np.zeros(0)
            nlabels = 0
            label_indexes = np.zeros(0, int)
            labels = np.zeros(mask.shape, int)

        ifull = []
        jfull = []
        ij_labelsfull = []
        labeldict = {}

        for label_id in np.unique(label_indexes):
            r = np.array(i) * (ij_labels == label_id)
            r = r[r != 0]
            c = np.array(j) * (ij_labels == label_id)
            c = c[c != 0]
            rect_strel = self.get_rectangle(angles[label_id - 1])
            seedmask = np.zeros_like(mask, int)
            seedmask[r, c] = label_id

            reconstructedlinearobject = skimage.filter.rank.maximum(
                seedmask, rect_strel)
            reconstructedlinearobject = reconstructedlinearobject * mask
            if self.overlap_within_angle == False:
                itemp, jtemp = np.where(reconstructedlinearobject == label_id)
                ifull += list(itemp)
                jfull += list(jtemp)
                ij_labelsfull += [label_id] * len(itemp)

            else:
                itemp, jtemp = np.where(reconstructedlinearobject == label_id)
                labeldict[label_id] = zip(itemp, jtemp)

        if self.overlap_within_angle == True:
            angledict = {}
            for eachangle in range(len(angles)):
                angledict[eachangle + 1] = [angles[eachangle]]
            nmerges = 1
            while nmerges != 0:
                nmerges = sum([
                    self.mergeduplicates(firstlabel, secondlabel, labeldict,
                                         angledict)
                    for firstlabel in label_indexes
                    for secondlabel in label_indexes
                    if firstlabel != secondlabel
                ])

            newlabels = labeldict.keys()
            newlabels.sort()
            newangles = angledict.keys()
            newangles.sort()
            angles = []
            for eachnewlabel in range(len(newlabels)):
                ifull += [
                    int(eachloc[0])
                    for eachloc in labeldict[newlabels[eachnewlabel]]
                ]
                jfull += [
                    int(eachloc[1])
                    for eachloc in labeldict[newlabels[eachnewlabel]]
                ]
                ij_labelsfull += [eachnewlabel + 1] * len(
                    labeldict[newlabels[eachnewlabel]])
                angles.append(np.mean(angledict[newlabels[eachnewlabel]]))
            angles = np.array(angles)

        ijv = np.zeros([len(ifull), 3], dtype=int)
        ijv[:, 0] = ifull
        ijv[:, 1] = jfull
        ijv[:, 2] = ij_labelsfull

        #
        # Make the objects
        #
        object_set = workspace.object_set
        object_name = self.object_name.value
        assert isinstance(object_set, cpo.ObjectSet)
        objects = cpo.Objects()
        objects.ijv = ijv
        objects.parent_image = image
        object_set.add_objects(objects, object_name)
        if self.show_window:
            workspace.display_data.mask = mask
            workspace.display_data.overlapping_labels = [
                l for l, idx in objects.get_labels()
            ]
        if self.overlap_within_angle == True:
            center_x = np.bincount(ijv[:, 2],
                                   ijv[:, 1])[objects.indices] / objects.areas
            center_y = np.bincount(ijv[:, 2],
                                   ijv[:, 0])[objects.indices] / objects.areas

        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        m.add_measurement(object_name, "Location_Center_X", center_x)
        m.add_measurement(object_name, "Location_Center_Y", center_y)
        m.add_measurement(object_name, M_ANGLE, angles * 180 / np.pi)
        m.add_measurement(object_name, "Number_Object_Number", label_indexes)
        m.add_image_measurement("Count_%s" % object_name, nlabels)
    def do_measurements(self, workspace, image_name, object_name,
                        center_object_name, center_choice,
                        bin_count_settings, dd):
        '''Perform the radial measurements on the image set

        workspace - workspace that holds images / objects
        image_name - make measurements on this image
        object_name - make measurements on these objects
        center_object_name - use the centers of these related objects as
                      the centers for radial measurements. None to use the
                      objects themselves.
        center_choice - the user's center choice for this object:
                      C_SELF, C_CENTERS_OF_OBJECTS or C_EDGES_OF_OBJECTS.
        bin_count_settings - the bin count settings group
        d - a dictionary for saving reusable partial results

        returns one statistics tuple per ring.
        '''
        assert isinstance(workspace, cpw.Workspace)
        assert isinstance(workspace.object_set, cpo.ObjectSet)
        bin_count = bin_count_settings.bin_count.value
        wants_scaled = bin_count_settings.wants_scaled.value
        maximum_radius = bin_count_settings.maximum_radius.value

        image = workspace.image_set.get_image(image_name,
                                              must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        labels, pixel_data = cpo.crop_labels_and_image(objects.segmented,
                                                       image.pixel_data)
        nobjects = np.max(objects.segmented)
        measurements = workspace.measurements
        assert isinstance(measurements, cpmeas.Measurements)
        heatmaps = {}
        for heatmap in self.heatmaps:
            if heatmap.object_name.get_objects_name() == object_name and \
                            image_name == heatmap.image_name.get_image_name() and \
                            heatmap.get_number_of_bins() == bin_count:
                dd[id(heatmap)] = \
                    heatmaps[MEASUREMENT_ALIASES[heatmap.measurement.value]] = \
                    np.zeros(labels.shape)
        if nobjects == 0:
            for bin in range(1, bin_count + 1):
                for feature in (F_FRAC_AT_D, F_MEAN_FRAC, F_RADIAL_CV):
                    feature_name = (
                        (feature + FF_GENERIC) % (image_name, bin, bin_count))
                    measurements.add_measurement(
                            object_name, "_".join([M_CATEGORY, feature_name]),
                            np.zeros(0))
                    if not wants_scaled:
                        measurement_name = "_".join([M_CATEGORY, feature,
                                                     image_name, FF_OVERFLOW])
                        measurements.add_measurement(
                                object_name, measurement_name, np.zeros(0))
            return [(image_name, object_name, "no objects", "-", "-", "-", "-")]
        name = (object_name if center_object_name is None
                else "%s_%s" % (object_name, center_object_name))
        if dd.has_key(name):
            normalized_distance, i_center, j_center, good_mask = dd[name]
        else:
            d_to_edge = distance_to_edge(labels)
            if center_object_name is not None:
                #
                # Use the center of the centering objects to assign a center
                # to each labeled pixel using propagation
                #
                center_objects = workspace.object_set.get_objects(center_object_name)
                center_labels, cmask = cpo.size_similarly(
                        labels, center_objects.segmented)
                pixel_counts = fix(scind.sum(
                        np.ones(center_labels.shape),
                        center_labels,
                        np.arange(1, np.max(center_labels) + 1, dtype=np.int32)))
                good = pixel_counts > 0
                i, j = (centers_of_labels(center_labels) + .5).astype(int)
                ig = i[good]
                jg = j[good]
                lg = np.arange(1, len(i) + 1)[good]
                if center_choice == C_CENTERS_OF_OTHER:
                    #
                    # Reduce the propagation labels to the centers of
                    # the centering objects
                    #
                    center_labels = np.zeros(center_labels.shape, int)
                    center_labels[ig, jg] = lg
                cl, d_from_center = propagate(np.zeros(center_labels.shape),
                                              center_labels,
                                              labels != 0, 1)
                #
                # Erase the centers that fall outside of labels
                #
                cl[labels == 0] = 0
                #
                # If objects are hollow or crescent-shaped, there may be
                # objects without center labels. As a backup, find the
                # center that is the closest to the center of mass.
                #
                missing_mask = (labels != 0) & (cl == 0)
                missing_labels = np.unique(labels[missing_mask])
                if len(missing_labels):
                    all_centers = centers_of_labels(labels)
                    missing_i_centers, missing_j_centers = \
                        all_centers[:, missing_labels - 1]
                    di = missing_i_centers[:, np.newaxis] - ig[np.newaxis, :]
                    dj = missing_j_centers[:, np.newaxis] - jg[np.newaxis, :]
                    missing_best = lg[np.argsort((di * di + dj * dj,))[:, 0]]
                    best = np.zeros(np.max(labels) + 1, int)
                    best[missing_labels] = missing_best
                    cl[missing_mask] = best[labels[missing_mask]]
                    #
                    # Now compute the crow-flies distance to the centers
                    # of these pixels from whatever center was assigned to
                    # the object.
                    #
                    iii, jjj = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
                    di = iii[missing_mask] - i[cl[missing_mask] - 1]
                    dj = jjj[missing_mask] - j[cl[missing_mask] - 1]
                    d_from_center[missing_mask] = np.sqrt(di * di + dj * dj)
            else:
                # Find the point in each object farthest away from the edge.
                # This does better than the centroid:
                # * The center is within the object
                # * The center tends to be an interesting point, like the
                #   center of the nucleus or the center of one or the other
                #   of two touching cells.
                #
                i, j = maximum_position_of_labels(d_to_edge, labels, objects.indices)
                center_labels = np.zeros(labels.shape, int)
                center_labels[i, j] = labels[i, j]
                #
                # Use the coloring trick here to process touching objects
                # in separate operations
                #
                colors = color_labels(labels)
                ncolors = np.max(colors)
                d_from_center = np.zeros(labels.shape)
                cl = np.zeros(labels.shape, int)
                for color in range(1, ncolors + 1):
                    mask = colors == color
                    l, d = propagate(np.zeros(center_labels.shape),
                                     center_labels,
                                     mask, 1)
                    d_from_center[mask] = d[mask]
                    cl[mask] = l[mask]
            good_mask = cl > 0
            if center_choice == C_EDGES_OF_OTHER:
                # Exclude pixels within the centering objects
                # when performing calculations from the centers
                good_mask = good_mask & (center_labels == 0)
            i_center = np.zeros(cl.shape)
            i_center[good_mask] = i[cl[good_mask] - 1]
            j_center = np.zeros(cl.shape)
            j_center[good_mask] = j[cl[good_mask] - 1]

            normalized_distance = np.zeros(labels.shape)
            if wants_scaled:
                total_distance = d_from_center + d_to_edge
                normalized_distance[good_mask] = (d_from_center[good_mask] /
                                                  (total_distance[good_mask] + .001))
            else:
                normalized_distance[good_mask] = \
                    d_from_center[good_mask] / maximum_radius
            dd[name] = [normalized_distance, i_center, j_center, good_mask]
        ngood_pixels = np.sum(good_mask)
        good_labels = labels[good_mask]
        bin_indexes = (normalized_distance * bin_count).astype(int)
        bin_indexes[bin_indexes > bin_count] = bin_count
        labels_and_bins = (good_labels - 1, bin_indexes[good_mask])
        histogram = coo_matrix((pixel_data[good_mask], labels_and_bins),
                               (nobjects, bin_count + 1)).toarray()
        sum_by_object = np.sum(histogram, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_distance = histogram / sum_by_object_per_bin
        number_at_distance = coo_matrix((np.ones(ngood_pixels), labels_and_bins),
                                        (nobjects, bin_count + 1)).toarray()
        object_mask = number_at_distance > 0
        sum_by_object = np.sum(number_at_distance, 1)
        sum_by_object_per_bin = np.dstack([sum_by_object] * (bin_count + 1))[0]
        fraction_at_bin = number_at_distance / sum_by_object_per_bin
        mean_pixel_fraction = fraction_at_distance / (fraction_at_bin +
                                                      np.finfo(float).eps)
        masked_fraction_at_distance = masked_array(fraction_at_distance,
                                                   ~object_mask)
        masked_mean_pixel_fraction = masked_array(mean_pixel_fraction,
                                                  ~object_mask)
        # Anisotropy calculation.  Split each cell into eight wedges, then
        # compute coefficient of variation of the wedges' mean intensities
        # in each ring.
        #
        # Compute each pixel's delta from the center object's centroid
        i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]
        imask = i[good_mask] > i_center[good_mask]
        jmask = j[good_mask] > j_center[good_mask]
        absmask = (abs(i[good_mask] - i_center[good_mask]) >
                   abs(j[good_mask] - j_center[good_mask]))
        radial_index = (imask.astype(int) + jmask.astype(int) * 2 +
                        absmask.astype(int) * 4)
        statistics = []

        for bin in range(bin_count + (0 if wants_scaled else 1)):
            bin_mask = (good_mask & (bin_indexes == bin))
            bin_pixels = np.sum(bin_mask)
            bin_labels = labels[bin_mask]
            bin_radial_index = radial_index[bin_indexes[good_mask] == bin]
            labels_and_radii = (bin_labels - 1, bin_radial_index)
            radial_values = coo_matrix((pixel_data[bin_mask],
                                        labels_and_radii),
                                       (nobjects, 8)).toarray()
            pixel_count = coo_matrix((np.ones(bin_pixels), labels_and_radii),
                                     (nobjects, 8)).toarray()
            mask = pixel_count == 0
            radial_means = masked_array(radial_values / pixel_count, mask)
            radial_cv = np.std(radial_means, 1) / np.mean(radial_means, 1)
            radial_cv[np.sum(~mask, 1) == 0] = 0
            for measurement, feature, overflow_feature in (
                    (fraction_at_distance[:, bin], MF_FRAC_AT_D, OF_FRAC_AT_D),
                    (mean_pixel_fraction[:, bin], MF_MEAN_FRAC, OF_MEAN_FRAC),
                    (np.array(radial_cv), MF_RADIAL_CV, OF_RADIAL_CV)):

                if bin == bin_count:
                    measurement_name = overflow_feature % image_name
                else:
                    measurement_name = feature % (image_name, bin + 1, bin_count)
                measurements.add_measurement(object_name,
                                             measurement_name,
                                             measurement)
                if feature in heatmaps:
                    heatmaps[feature][bin_mask] = measurement[bin_labels - 1]
            radial_cv.mask = np.sum(~mask, 1) == 0
            bin_name = str(bin + 1) if bin < bin_count else "Overflow"
            statistics += [(image_name, object_name, bin_name, str(bin_count),
                            round(np.mean(masked_fraction_at_distance[:, bin]), 4),
                            round(np.mean(masked_mean_pixel_fraction[:, bin]), 4),
                            round(np.mean(radial_cv), 4))]
        return statistics
    def run_on_objects(self, object_name, workspace):
        """Run, computing the area measurements for a single map of objects"""
        objects = workspace.get_objects(object_name)

        if len(objects.shape) == 2:
            #
            # Do the ellipse-related measurements
            #
            i, j, l = objects.ijv.transpose()
            centers, eccentricity, major_axis_length, minor_axis_length, \
            theta, compactness = \
                ellipse_from_second_moments_ijv(i, j, 1, l, objects.indices, True)
            del i
            del j
            del l
            self.record_measurement(workspace, object_name,
                                    F_ECCENTRICITY, eccentricity)
            self.record_measurement(workspace, object_name,
                                    F_MAJOR_AXIS_LENGTH, major_axis_length)
            self.record_measurement(workspace, object_name,
                                    F_MINOR_AXIS_LENGTH, minor_axis_length)
            self.record_measurement(workspace, object_name, F_ORIENTATION,
                                    theta * 180 / np.pi)
            self.record_measurement(workspace, object_name, F_COMPACTNESS,
                                    compactness)
            is_first = False
            if len(objects.indices) == 0:
                nobjects = 0
            else:
                nobjects = np.max(objects.indices)
            mcenter_x = np.zeros(nobjects)
            mcenter_y = np.zeros(nobjects)
            mextent = np.zeros(nobjects)
            mperimeters = np.zeros(nobjects)
            msolidity = np.zeros(nobjects)
            euler = np.zeros(nobjects)
            max_radius = np.zeros(nobjects)
            median_radius = np.zeros(nobjects)
            mean_radius = np.zeros(nobjects)
            min_feret_diameter = np.zeros(nobjects)
            max_feret_diameter = np.zeros(nobjects)
            zernike_numbers = self.get_zernike_numbers()
            zf = {}
            for n, m in zernike_numbers:
                zf[(n, m)] = np.zeros(nobjects)
            if nobjects > 0:
                chulls, chull_counts = convex_hull_ijv(objects.ijv, objects.indices)
                for labels, indices in objects.get_labels():
                    to_indices = indices - 1
                    distances = distance_to_edge(labels)
                    mcenter_y[to_indices], mcenter_x[to_indices] = \
                        maximum_position_of_labels(distances, labels, indices)
                    max_radius[to_indices] = fix(scind.maximum(
                            distances, labels, indices))
                    mean_radius[to_indices] = fix(scind.mean(
                            distances, labels, indices))
                    median_radius[to_indices] = median_of_labels(
                            distances, labels, indices)
                    #
                    # The extent (area / bounding box area)
                    #
                    mextent[to_indices] = calculate_extents(labels, indices)
                    #
                    # The perimeter distance
                    #
                    mperimeters[to_indices] = calculate_perimeters(labels, indices)
                    #
                    # Solidity
                    #
                    msolidity[to_indices] = calculate_solidity(labels, indices)
                    #
                    # Euler number
                    #
                    euler[to_indices] = euler_number(labels, indices)
                    #
                    # Zernike features
                    #
                    if self.calculate_zernikes.value:
                        zf_l = cpmz.zernike(zernike_numbers, labels, indices)
                        for (n, m), z in zip(zernike_numbers, zf_l.transpose()):
                            zf[(n, m)][to_indices] = z
                #
                # Form factor
                #
                ff = 4.0 * np.pi * objects.areas / mperimeters ** 2
                #
                # Feret diameter
                #
                min_feret_diameter, max_feret_diameter = \
                    feret_diameter(chulls, chull_counts, objects.indices)

            else:
                ff = np.zeros(0)

            for f, m in ([(F_AREA, objects.areas),
                          (F_CENTER_X, mcenter_x),
                          (F_CENTER_Y, mcenter_y),
                          (F_CENTER_Z, np.ones_like(mcenter_x)),
                          (F_EXTENT, mextent),
                          (F_PERIMETER, mperimeters),
                          (F_SOLIDITY, msolidity),
                          (F_FORM_FACTOR, ff),
                          (F_EULER_NUMBER, euler),
                          (F_MAXIMUM_RADIUS, max_radius),
                          (F_MEAN_RADIUS, mean_radius),
                          (F_MEDIAN_RADIUS, median_radius),
                          (F_MIN_FERET_DIAMETER, min_feret_diameter),
                          (F_MAX_FERET_DIAMETER, max_feret_diameter)] +
                             [(self.get_zernike_name((n, m)), zf[(n, m)])
                              for n, m in zernike_numbers]):
                self.record_measurement(workspace, object_name, f, m)
        else:
            labels = objects.segmented

            props = skimage.measure.regionprops(labels)

            # Area
            areas = [prop.area for prop in props]

            self.record_measurement(workspace, object_name, F_AREA, areas)

            # Extent
            extents = [prop.extent for prop in props]

            self.record_measurement(workspace, object_name, F_EXTENT, extents)

            # Centers of mass
            centers = objects.center_of_mass()

            center_z, center_y, center_x = centers.transpose()

            self.record_measurement(workspace, object_name, F_CENTER_X, center_x)

            self.record_measurement(workspace, object_name, F_CENTER_Y, center_y)

            self.record_measurement(workspace, object_name, F_CENTER_Z, center_z)

            # Perimeters
            perimeters = []

            for label in np.unique(labels):
                if label == 0:
                    continue

                volume = np.zeros_like(labels, dtype='bool')

                volume[labels == label] = True

                verts, faces, _, _ = skimage.measure.marching_cubes(
                    volume,
                    spacing=objects.parent_image.spacing if objects.has_parent_image else (1.0,) * labels.ndim,
                    level=0
                )

                perimeters += [skimage.measure.mesh_surface_area(verts, faces)]

            if len(perimeters) == 0:
                self.record_measurement(workspace, object_name, F_PERIMETER, [0])
            else:
                self.record_measurement(workspace, object_name, F_PERIMETER, perimeters)

            for feature in self.get_feature_names():
                if feature in [F_AREA, F_EXTENT, F_CENTER_X, F_CENTER_Y, F_CENTER_Z, F_PERIMETER]:
                    continue

                self.record_measurement(workspace, object_name, feature, [np.nan])