Example #1
0
    def do_labels(self, labels):
        '''Run whatever transformation on the given labels matrix'''
        if (self.operation in (O_SHRINK, O_SHRINK_INF) and
                self.wants_fill_holes.value):
            labels = fill_labeled_holes(labels)

        if self.operation == O_SHRINK_INF:
            return binary_shrink(labels)
        elif self.operation == O_SHRINK:
            return binary_shrink(labels, iterations=self.iterations.value)
        elif self.operation in (O_EXPAND, O_EXPAND_INF):
            if self.operation == O_EXPAND_INF:
                distance = np.max(labels.shape)
            else:
                distance = self.iterations.value
            background = labels == 0
            distances, (i, j) = distance_transform_edt(background,
                                                       return_indices=True)
            out_labels = labels.copy()
            mask = (background & (distances <= distance))
            out_labels[mask] = labels[i[mask], j[mask]]
            return out_labels
        elif self.operation == O_DIVIDE:
            #
            # A pixel must be adjacent to some other label and the object
            # must not disappear.
            #
            adjacent_mask = adjacent(labels)
            thinnable_mask = binary_shrink(labels, 1) != 0
            out_labels = labels.copy()
            out_labels[adjacent_mask & ~ thinnable_mask] = 0
            return out_labels
        elif self.operation == O_SKELETONIZE:
            return skeletonize_labels(labels)
        elif self.operation == O_SPUR:
            return spur(labels, iterations=self.iterations.value)
        else:
            raise NotImplementedError("Unsupported operation: %s" %
                                      self.operation.value)
Example #2
0
 def test_01_02_run(self):
     module = instantiate_module(MODULE_NAME)
     module.input_objects_name.value = INPUT_OBJECTS_NAME
     module.output_objects_name.value = OUTPUT_OBJECTS_NAME
     module.module_num = 1
     pipeline = cpp.Pipeline()
     pipeline.add_module(module)
     
     object_set = cpo.ObjectSet()
     #
     # Pick a bunch of random points, dilate them using the distance
     # transform and then label the result.
     #
     r = np.random.RandomState()
     r.seed(12)
     bimg = np.ones((100, 100), bool)
     bimg[r.randint(0,100, 50), r.randint(0, 100, 50)] = False
     labels, count = label(distance_transform_edt(bimg) <= 5)
     #
     # Make the input objects
     #
     input_objects = cpo.Objects()
     input_objects.segmented = labels
     expected = skeletonize_labels(labels)
     object_set.add_objects(input_objects, INPUT_OBJECTS_NAME)
     #
     # Make the workspace
     #
     workspace = cpw.Workspace(pipeline, module, None, object_set,
                               cpmeas.Measurements(), None)
     module.run(workspace)
     
     self.assertTrue(OUTPUT_OBJECTS_NAME in object_set.object_names,
                     "Could not find the output objects in the object set")
     output_objects = object_set.get_objects(OUTPUT_OBJECTS_NAME)
     np.testing.assert_array_equal(expected, output_objects.segmented)
Example #3
0
 def run(self, workspace):
     #
     # This is unfortunate... sorry. example4b.py is imported during the
     # import of cellprofiler.modules. This means that the import of
     # cellprofiler.modules.identify can't be done in this module's import
     #
     import cellprofiler.modules.identify as I
     #
     # The object_set keeps track of the objects produced during a cycle
     #
     # Crucial methods:
     #
     # object_set.get_objects(name) returns an instance of cpo.Objects
     #
     # object_set.add(objects, name) adds objects with the given name to
     #           the object set
     #
     #
     # Create objects in three steps:
     #     make a labels matrix
     #     create an instance of cpo.Objects()
     #     set cpo.Objects.segmented = labels matrix
     #
     # You can be "nicer" by giving more information, but this is not
     # absolutely necessary. See subsequent exercises for how to be nice.
     #     
     object_set = workspace.object_set
     input_objects = object_set.get_objects(self.input_objects_name.value)
     labels = skeletonize_labels(input_objects.segmented)
     output_objects = cpo.Objects()
     output_objects.segmented = labels
     output_objects_name = self.output_objects_name.value
     object_set.add_objects(output_objects, output_objects_name)
     
     ##measurements = workspace.measurements
     #
     # The cpo.Objects has several useful properties that are calculated
     # and remembered: count and area are the ones most frequently used.
     # count is the # of objects in the segmentation
     # 
     ##n_objects = output_objects.count
     #
     # cellprofiler.module.identify has some helper methods for adding
     # measurements in a standardized fashion. add_object_count_measurements
     # only adds one measurement: Count_<objects-name>
     #
     ##I.add_object_count_measurements(measurements, output_objects_name,
     ##                                n_objects)
     #
     # cellprofiler.modules.identify.add_object_location_measurements
     # computes the center of mass for each object in the labels matrix
     # and records those in the object measurement, Location_Center_X
     # and Location_Center_Y. These measurements are used by data mining
     # programs such as CellProfiler Analyst to center an image on a
     # particular cell.
     #
     ##I.add_object_location_measurements(
     ##    measurements, output_objects_name, labels, n_objects)
     if workspace.show_frame:
         workspace.display_data.input_labels = input_objects.segmented
         workspace.display_data.output_labels = labels
Example #4
0
    def test_01_02_run(self):
        module = instantiate_module(MODULE_NAME)
        module.input_objects_name.value = INPUT_OBJECTS_NAME
        module.output_objects_name.value = OUTPUT_OBJECTS_NAME
        module.module_num = 1
        pipeline = cpp.Pipeline()
        pipeline.add_module(module)

        object_set = cpo.ObjectSet()
        #
        # Pick a bunch of random points, dilate them using the distance
        # transform and then label the result.
        #
        r = np.random.RandomState()
        r.seed(12)
        bimg = np.ones((100, 100), bool)
        bimg[r.randint(0, 100, 50), r.randint(0, 100, 50)] = False
        labels, count = label(distance_transform_edt(bimg) <= 5)
        #
        # Make the input objects
        #
        input_objects = cpo.Objects()
        input_objects.segmented = labels
        expected = skeletonize_labels(labels)
        object_set.add_objects(input_objects, INPUT_OBJECTS_NAME)
        #
        # Make the workspace
        #
        measurements = cpmeas.Measurements()
        workspace = cpw.Workspace(pipeline, module, None, object_set,
                                  measurements, None)
        module.run(workspace)
        #
        # Calculate the centers using Numpy. Scipy can do this too.
        # But maybe it's instructive to show you how to go at the labels
        # matrix using Numpy.
        #
        # We're going to get the centroids by taking the average value
        # of x and y per object.
        #
        y, x = np.mgrid[0:labels.shape[0], 0:labels.shape[1]].astype(float)
        #
        # np.bincount counts the number of occurrences of each integer value.
        # You need to operate on a 1d array - if you flatten the labels
        # and weights, their pixels still align.
        #
        # We do [1:] to discard the background which is labeled 0
        #
        # The optional second argument to np.bincount is the "weight". For
        # each label value, maintain a running sum of the weights.
        #
        areas = np.bincount(expected.flatten())[1:]
        total_x = np.bincount(expected.flatten(), weights=x.flatten())[1:]
        total_y = np.bincount(expected.flatten(), weights=y.flatten())[1:]
        expected_location_x = total_x / areas
        expected_location_y = total_y / areas
        #
        # Now check against the measurements.
        #
        count_feature = I.C_COUNT + "_" + OUTPUT_OBJECTS_NAME
        self.assertTrue(
            measurements.has_feature(cpmeas.IMAGE, count_feature),
            "Your module did not produce a %s measurement" % count_feature)
        count = measurements.get_measurement(cpmeas.IMAGE, count_feature)
        self.assertEqual(count, len(areas))
        for ftr, expected in ((I.M_LOCATION_CENTER_X, expected_location_x),
                              (I.M_LOCATION_CENTER_Y, expected_location_y)):
            self.assertTrue(measurements.has_feature(OUTPUT_OBJECTS_NAME, ftr))
            location = measurements.get_measurement(OUTPUT_OBJECTS_NAME, ftr)
            np.testing.assert_almost_equal(location, expected)
Example #5
0
 def test_01_02_run(self):
     module = instantiate_module(MODULE_NAME)
     module.input_objects_name.value = INPUT_OBJECTS_NAME
     module.output_objects_name.value = OUTPUT_OBJECTS_NAME
     module.module_num = 1
     pipeline = cpp.Pipeline()
     pipeline.add_module(module)
     
     object_set = cpo.ObjectSet()
     #
     # Pick a bunch of random points, dilate them using the distance
     # transform and then label the result.
     #
     r = np.random.RandomState()
     r.seed(12)
     bimg = np.ones((100, 100), bool)
     bimg[r.randint(0,100, 50), r.randint(0, 100, 50)] = False
     labels, count = label(distance_transform_edt(bimg) <= 5)
     #
     # Make the input objects
     #
     input_objects = cpo.Objects()
     input_objects.segmented = labels
     expected = skeletonize_labels(labels)
     object_set.add_objects(input_objects, INPUT_OBJECTS_NAME)
     #
     # Make the workspace
     #
     measurements = cpmeas.Measurements()
     workspace = cpw.Workspace(pipeline, module, None, object_set,
                               measurements, None)
     module.run(workspace)
     #
     # Calculate the centers using Numpy. Scipy can do this too.
     # But maybe it's instructive to show you how to go at the labels
     # matrix using Numpy.
     #
     # We're going to get the centroids by taking the average value
     # of x and y per object.
     #
     y, x = np.mgrid[0:labels.shape[0], 0:labels.shape[1]].astype(float)
     #
     # np.bincount counts the number of occurrences of each integer value.
     # You need to operate on a 1d array - if you flatten the labels
     # and weights, their pixels still align.
     #
     # We do [1:] to discard the background which is labeled 0
     #
     # The optional second argument to np.bincount is the "weight". For
     # each label value, maintain a running sum of the weights.
     #
     areas = np.bincount(expected.flatten())[1:]
     total_x = np.bincount(expected.flatten(), weights=x.flatten())[1:]
     total_y = np.bincount(expected.flatten(), weights=y.flatten())[1:]
     expected_location_x = total_x / areas
     expected_location_y = total_y / areas
     #
     # Now check against the measurements.
     #
     count_feature = I.C_COUNT + "_" + OUTPUT_OBJECTS_NAME
     self.assertTrue(measurements.has_feature(cpmeas.IMAGE, count_feature),
                     "Your module did not produce a %s measurement" %
                     count_feature)
     count = measurements.get_measurement(cpmeas.IMAGE, count_feature)
     self.assertEqual(count, len(areas))
     for ftr, expected in ((I.M_LOCATION_CENTER_X, expected_location_x),
                           (I.M_LOCATION_CENTER_Y, expected_location_y)):
         self.assertTrue(measurements.has_feature(
             OUTPUT_OBJECTS_NAME, ftr))
         location = measurements.get_measurement(OUTPUT_OBJECTS_NAME, ftr)
         np.testing.assert_almost_equal(location, expected)