Exemple #1
0
 def post_group(self, workspace, grouping):
     if self.tile_method == T_ACROSS_CYCLES:
         image_set = workspace.image_set
         if self.output_image.value not in image_set.names:
             d = self.get_dictionary(workspace.image_set_list)
             image_set.add(self.output_image.value,
                           cpi.Image(d[TILED_IMAGE]))
    def make_workspace(self, images):
        """Make a workspace """
        module = S.StackImages()
        pipeline = cpp.Pipeline()
        object_set = cpo.ObjectSet()
        image_set_list = cpi.ImageSetList()
        image_set = image_set_list.get_image_set(0)
        workspace = cpw.Workspace(
            pipeline,
            module,
            image_set,
            object_set,
            cpmeas.Measurements(),
            image_set_list,
        )

        # setup the input images
        names = [INPUT_IMAGE_BASENAME + str(i) for i, img in enumerate(images)]
        for img, nam in zip(images, names):
            image_set.add(nam, cpi.Image(img))

        # setup the input images settings
        module.stack_image_name.value = OUTPUT_IMAGE_NAME
        nimgs = len(images)
        while len(module.stack_channels) < nimgs:
            module.add_stack_channel_cb()
        for sc, imname in zip(module.stack_channels, names):
            sc.image_name.value = imname

        return workspace, module
 def provide_image(self, image_set):
     image_count = self.__image_count
     mask_2d = image_count > 0
     if self.__how_to_accumulate == P_VARIANCE:
         ndim_image = self.__vsquared
     elif self.__how_to_accumulate == P_POWER:
         ndim_image = self.__power_image
     elif self.__how_to_accumulate == P_BRIGHTFIELD:
         ndim_image = self.__bright_max
     else:
         ndim_image = self.__image
     if ndim_image.ndim == 3:
         image_count = np.dstack([image_count] * ndim_image.shape[2])
     mask = image_count > 0
     if self.__cached_image is not None:
         return self.__cached_image
     if self.__how_to_accumulate == P_AVERAGE:
         cached_image = self.__image / image_count
     elif self.__how_to_accumulate == P_VARIANCE:
         cached_image = np.zeros(self.__vsquared.shape, np.float32)
         cached_image[mask] = self.__vsquared[mask] / image_count[mask]
         cached_image[mask] -= self.__vsum[mask]**2 / (image_count[mask]**2)
     elif self.__how_to_accumulate == P_POWER:
         cached_image = np.zeros(image_count.shape, np.complex128)
         cached_image[mask] = self.__power_image[mask]
         cached_image[mask] -= (self.__vsum[mask] *
                                self.__power_mask[mask] / image_count[mask])
         cached_image = (cached_image * np.conj(cached_image)).real.astype(
             np.float32)
     elif self.__how_to_accumulate == P_BRIGHTFIELD:
         cached_image = np.zeros(image_count.shape, np.float32)
         cached_image[
             mask] = self.__bright_max[mask] - self.__bright_min[mask]
     elif self.__how_to_accumulate == P_MINIMUM and np.any(~mask):
         cached_image = self.__image.copy()
         cached_image[~mask] = 0
     else:
         cached_image = self.__image
     cached_image[~mask] = 0
     if np.all(mask) or self.__how_to_accumulate == P_MASK:
         self.__cached_image = cpi.Image(cached_image)
     else:
         self.__cached_image = cpi.Image(cached_image, mask=mask_2d)
     return self.__cached_image
    def run_summarize(self, workspace, image, fkt, **kwargs):
        """Combine images to make a grayscale one"""
        input_image = image.pixel_data
        print(image.pixel_data.shape)
        output_image = fkt(input_image, **kwargs)
        image = cpi.Image(output_image, parent_image=image)
        workspace.image_set.add(self.grayscale_name.value, image)

        workspace.display_data.input_image = input_image
        workspace.display_data.output_image = output_image
Exemple #5
0
 def run(self, workspace):
     """do the image analysis"""
     if self.tile_method == T_WITHIN_CYCLES:
         output_pixels = self.place_adjacent(workspace)
     else:
         output_pixels = self.tile(workspace)
     output_image = cpi.Image(output_pixels)
     workspace.image_set.add(self.output_image.value, output_image)
     if self.show_window:
         workspace.display_data.image = output_pixels
Exemple #6
0
    def run_split(self, workspace, image):
        """Split image into individual components
        """
        input_image = image.pixel_data
        disp_collection = []
        if self.rgb_or_channels in (CH_RGB, CH_CHANNELS):
            for index, name, title in self.channels_and_image_names():
                output_image = input_image[:, :, index]
                workspace.image_set.add(
                    name, cpi.Image(output_image, parent_image=image))
                disp_collection.append([output_image, name])
        elif self.rgb_or_channels == CH_HSV:
            output_image = matplotlib.colors.rgb_to_hsv(input_image)
            for index, name, title in self.channels_and_image_names():
                workspace.image_set.add(
                    name,
                    cpi.Image(output_image[:, :, index], parent_image=image))
                disp_collection.append([output_image[:, :, index], name])

        workspace.display_data.input_image = input_image
        workspace.display_data.disp_collection = disp_collection
Exemple #7
0
def make_workspace(image, mask):
    """Make a workspace for testing FilterByObjectMeasurement"""
    module = S.SmoothMultichannel()
    pipeline = cpp.Pipeline()
    object_set = cpo.ObjectSet()
    image_set_list = cpi.ImageSetList()
    image_set = image_set_list.get_image_set(0)
    workspace = cpw.Workspace(pipeline, module, image_set, object_set,
                              cpmeas.Measurements(), image_set_list)
    image_set.add(INPUT_IMAGE_NAME, cpi.Image(image, mask, scale=1))
    module.image_name.value = INPUT_IMAGE_NAME
    module.filtered_image_name.value = OUTPUT_IMAGE_NAME
    return workspace, module
Exemple #8
0
 def run_split(self, workspace, objmask, main_id, name):
     """Split image into individual components"""
     segmented = objmask
     is_main = segmented == main_id
     is_bg = segmented == 0
     is_other = (is_bg == False) & (is_main == False)
     imgs = [is_main, is_other, is_bg]
     bin_stack = np.stack(imgs, axis=2)
     workspace.image_set.add(name, cpi.Image(bin_stack, convert=True))
     disp_collection = [[
         img, tit
     ] for img, tit in zip(imgs, ["is main", "is other", "is bg"])]
     workspace.display_data.input_image = segmented
     workspace.display_data.disp_collection = disp_collection
Exemple #9
0
    def run(self, workspace):
        image_set = workspace.image_set
        if self.source_choice == IO_OBJECTS:
            objects = workspace.get_objects(self.object_name.value)
            labels = objects.segmented
            if self.invert_mask.value:
                mask = labels == 0
            else:
                mask = labels > 0
        else:
            objects = None
            try:
                mask = image_set.get_image(
                    self.masking_image_name.value, must_be_binary=True
                ).pixel_data
            except ValueError:
                mask = image_set.get_image(
                    self.masking_image_name.value, must_be_grayscale=True
                ).pixel_data
                mask = mask > 0.5
            if self.invert_mask.value:
                mask = mask == 0
        orig_image = image_set.get_image(self.image_name.value)
        if (
            orig_image.multichannel and mask.shape != orig_image.pixel_data.shape[:-1]
        ) or mask.shape != orig_image.pixel_data.shape:
            tmp = np.zeros(orig_image.pixel_data.shape[:2], mask.dtype)
            tmp[mask] = True
            mask = tmp
        if orig_image.has_mask:
            mask = np.logical_and(mask, orig_image.mask)
        masked_pixels = orig_image.pixel_data.copy()
        masked_pixels[np.logical_not(mask)] = 0
        masked_image = cpi.Image(
            masked_pixels,
            mask=mask,
            parent_image=orig_image,
            masking_objects=objects,
            dimensions=orig_image.dimensions,
        )

        image_set.add(self.masked_image_name.value, masked_image)

        if self.show_window:
            workspace.display_data.dimensions = orig_image.dimensions
            workspace.display_data.orig_image_pixel_data = orig_image.pixel_data
            workspace.display_data.masked_pixels = masked_pixels
            workspace.display_data.multichannel = orig_image.multichannel
Exemple #10
0
    def run_combine(self, workspace, image):
        """Combine images to make a grayscale one
        """
        input_image = image.pixel_data
        channels, contributions = list(zip(*self.channels_and_contributions()))
        denominator = sum(contributions)
        channels = np.array(channels, int)
        contributions = np.array(contributions) / denominator

        output_image = np.sum(
            input_image[:, :, channels] *
            contributions[np.newaxis, np.newaxis, :], 2)
        image = cpi.Image(output_image, parent_image=image)
        workspace.image_set.add(self.grayscale_name.value, image)

        workspace.display_data.input_image = input_image
        workspace.display_data.output_image = output_image
Exemple #11
0
    def run(self, workspace):
        image = workspace.image_set.get_image(self.image_name.value,
                                              must_be_grayscale=False)

        output_pixels = image.pixel_data.copy()
        output_pixels = output_pixels.astype(np.float)
        if len(image.pixel_data.shape) == 3:
            for i in range(image.pixel_data.shape[2]):
                output_pixels[:, :, i] = self.run_per_layer(image, i)
        else:
            output_pixels = self.run_per_layer(image, -1)

        output_image = cpi.Image(output_pixels, parent_image=image)
        workspace.image_set.add(self.transformed_image_name.value,
                                output_image)
        workspace.display_data.pixel_data = image.pixel_data
        workspace.display_data.output_pixels = output_pixels
Exemple #12
0
def make_workspace(image, outlier_percentile):
    """Make a workspace """
    module = C.ClipRange()
    pipeline = cpp.Pipeline()
    object_set = cpo.ObjectSet()
    image_set_list = cpi.ImageSetList()
    image_set = image_set_list.get_image_set(0)
    workspace = cpw.Workspace(pipeline, module, image_set, object_set,
                              cpmeas.Measurements(), image_set_list)

    # setup the input images
    image_set.add(INPUT_IMAGE_NAME, cpi.Image(image))

    # setup the input images settings
    module.x_name.value = INPUT_IMAGE_NAME
    module.y_name.value = OUTPUT_IMAGE_NAME
    module.outlier_percentile.value = outlier_percentile

    return workspace, module
    def run(self, workspace):
        parent_image = None
        parent_image_name = None
        imgset = workspace.image_set
        stack_pixel_data = None
        input_image_names = []
        channel_names = []
        input_image_names = [sc.image_name.value for sc in self.stack_channels]
        channel_names = input_image_names
        source_channels = [
            imgset.get_image(name, must_be_grayscale=False).pixel_data
            for name in input_image_names
        ]
        parent_image = imgset.get_image(input_image_names[0])
        for idx, pd in enumerate(source_channels):
            if pd.shape[:2] != source_channels[0].shape[:2]:
                raise ValueError(
                    "The %s image and %s image have different sizes (%s vs %s)"
                    % (
                        self.stack_channels[0].image_name.value,
                        self.stack_channels[idx].image_name.value,
                        source_channels[0].shape[:2],
                        pd.shape[:2],
                    ))
        stack_pixel_data = np.dstack(source_channels)

        ##############
        # Save image #
        ##############
        stack_image = cpi.Image(stack_pixel_data, parent_image=parent_image)
        stack_image.channel_names = channel_names
        imgset.add(self.stack_image_name.value, stack_image)

        ##################
        # Display images #
        ##################
        if self.show_window:
            workspace.display_data.input_image_names = input_image_names
            workspace.display_data.stack_pixel_data = stack_pixel_data
            workspace.display_data.images = [
                imgset.get_image(name, must_be_grayscale=False).pixel_data
                for name in input_image_names
            ]
Exemple #14
0
 def run(self, workspace):
     image = workspace.image_set.get_image(self.image_name.value)
     if image.has_mask:
         mask = image.mask
     else:
         mask = None
     pixel_data = image.pixel_data
     if pixel_data.ndim == 3:
         if any([
                 np.any(pixel_data[:, :, 0] != pixel_data[:, :, plane])
                 for plane in range(1, pixel_data.shape[2])
         ]):
             logger.warn("Image is color, converting to grayscale")
         pixel_data = np.sum(pixel_data, 2) / pixel_data.shape[2]
     for function in self.functions:
         pixel_data = self.run_function(function, pixel_data, mask)
     new_image = cpi.Image(pixel_data, parent_image=image)
     workspace.image_set.add(self.output_image_name.value, new_image)
     if self.show_window:
         workspace.display_data.image = image.pixel_data
         workspace.display_data.pixel_data = pixel_data
 def run(self, workspace):
     image = workspace.image_set.get_image(
         self.image_name.value, must_be_grayscale=False
     )
     hp_threshold = self.hp_threshold.value
     if self.scale_hp_threshold.value is True:
         hp_threshold /= image.scale
     if len(image.pixel_data.shape) == 3:
         if self.smoothing_method.value == CLIP_HOT_PIXELS:
             # TODO support masks
             hp_filter_shape = (
                 self.hp_filter_size.value,
                 self.hp_filter_size.value,
                 1,
             )
             output_pixels = SmoothMultichannel.clip_hot_pixels(
                 image.pixel_data, hp_filter_shape, hp_threshold
             )
         else:
             output_pixels = image.pixel_data.copy()
             for channel in range(image.pixel_data.shape[2]):
                 output_pixels[:, :, channel] = self.run_grayscale(
                     image.pixel_data[:, :, channel], image
                 )
     else:
         if self.smoothing_method.value == CLIP_HOT_PIXELS:
             # TODO support masks
             hp_filter_shape = (self.hp_filter_size.value, self.hp_filter_size.value)
             output_pixels = SmoothMultichannel.clip_hot_pixels(
                 image.pixel_data, hp_filter_shape, hp_threshold
             )
         else:
             output_pixels = self.run_grayscale(image.pixel_data, image)
     output_image = cpi.Image(output_pixels, parent_image=image)
     workspace.image_set.add(self.filtered_image_name.value, output_image)
     workspace.display_data.pixel_data = image.pixel_data
     workspace.display_data.output_pixels = output_pixels
Exemple #16
0
 def run_image(self, image, workspace):
     #
     # Get the image names from the settings
     #
     image_name = image.image_name.value
     spill_correct_name = image.spill_correct_function_image_name.value
     corrected_image_name = image.corrected_image_name.value
     #
     # Get images from the image set
     #
     orig_image = workspace.image_set.get_image(image_name)
     spillover_mat = workspace.image_set.get_image(spill_correct_name)
     #
     # Either divide or subtract the illumination image from the original
     #
     method = image.spill_correct_method.value
     output_pixels = self.compensate_image_ls(orig_image.pixel_data,
                                              spillover_mat.pixel_data,
                                              method)
     # Save the output image in the image set and have it inherit
     # mask & cropping from the original image.
     #
     output_image = cpi.Image(output_pixels, parent_image=orig_image)
     workspace.image_set.add(corrected_image_name, output_image)
     #
     # Save images for display
     #
     if self.show_window:
         if not hasattr(workspace.display_data, "images"):
             workspace.display_data.images = {}
             workspace.display_data.images[
                 image_name] = orig_image.pixel_data
             workspace.display_data.images[
                 corrected_image_name] = output_pixels
             workspace.display_data.images[
                 spill_correct_name] = spillover_mat.pixel_data
Exemple #17
0
 def run_on_output(self, workspace, input_image, output):
     """Produce one image - storing it in the image set"""
     input_pixels = input_image.pixel_data
     inverse_absorbances = self.get_inverse_absorbances(output)
     #########################################
     #
     # Renormalize to control for the other stains
     #
     # Log transform the image data
     #
     # First, rescale it a little to offset it from zero
     #
     eps = 1.0 / 256.0 / 2.0
     image = input_pixels + eps
     log_image = np.log(image)
     #
     # Now multiply the log-transformed image
     #
     scaled_image = log_image * inverse_absorbances[np.newaxis,
                                                    np.newaxis, :]
     #
     # Exponentiate to get the image without the dye effect
     #
     image = np.exp(np.sum(scaled_image, 2))
     #
     # and subtract out the epsilon we originally introduced
     #
     image -= eps
     image[image < 0] = 0
     image[image > 1] = 1
     image = 1 - image
     image_name = output.image_name.value
     output_image = cpi.Image(image, parent_image=input_image)
     workspace.image_set.add(image_name, output_image)
     if self.show_window:
         workspace.display_data.outputs[image_name] = image
Exemple #18
0
    def run(self, workspace):
        parent_image = None
        parent_image_name = None
        imgset = workspace.image_set
        rgb_pixel_data = None
        input_image_names = []
        channel_names = []
        if self.scheme_choice not in (SCHEME_STACK, SCHEME_COMPOSITE):
            for color_scheme_setting in self.color_scheme_settings:
                if color_scheme_setting.image_name.is_blank:
                    channel_names.append("Blank")
                    continue
                image_name = color_scheme_setting.image_name.value
                input_image_names.append(image_name)
                channel_names.append(image_name)
                image = imgset.get_image(image_name, must_be_grayscale=True)
                multiplier = (color_scheme_setting.intensities *
                              color_scheme_setting.adjustment_factor.value)
                pixel_data = image.pixel_data
                if parent_image is not None:
                    if parent_image.pixel_data.shape != pixel_data.shape:
                        raise ValueError(
                            "The %s image and %s image have different sizes (%s vs %s)"
                            % (
                                parent_image_name,
                                color_scheme_setting.image_name.value,
                                parent_image.pixel_data.shape,
                                image.pixel_data.shape,
                            ))
                    rgb_pixel_data += np.dstack([pixel_data] * 3) * multiplier
                else:
                    parent_image = image
                    parent_image_name = color_scheme_setting.image_name.value
                    rgb_pixel_data = np.dstack([pixel_data] * 3) * multiplier
        else:
            input_image_names = [
                sc.image_name.value for sc in self.stack_channels
            ]
            channel_names = input_image_names
            source_channels = [
                imgset.get_image(name, must_be_grayscale=True).pixel_data
                for name in input_image_names
            ]
            parent_image = imgset.get_image(input_image_names[0])
            for idx, pd in enumerate(source_channels):
                if pd.shape != source_channels[0].shape:
                    raise ValueError(
                        "The %s image and %s image have different sizes (%s vs %s)"
                        % (
                            self.stack_channels[0].image_name.value,
                            self.stack_channels[idx].image_name.value,
                            source_channels[0].shape,
                            pd.pixel_data.shape,
                        ))
            if self.scheme_choice == SCHEME_STACK:
                rgb_pixel_data = np.dstack(source_channels)
            else:
                colors = []
                for sc in self.stack_channels:
                    color_tuple = sc.color.to_rgb()
                    color = (sc.weight.value * np.array(color_tuple).astype(
                        parent_image.pixel_data.dtype) / 255)
                    colors.append(color[np.newaxis, np.newaxis, :])
                rgb_pixel_data = parent_image.pixel_data[:, :, np.
                                                         newaxis] * colors[0]
                for image, color in zip(source_channels[1:], colors[1:]):
                    rgb_pixel_data = rgb_pixel_data + image[:, :,
                                                            np.newaxis] * color

        ##############
        # Save image #
        ##############
        rgb_image = cpi.Image(rgb_pixel_data, parent_image=parent_image)
        rgb_image.channel_names = channel_names
        imgset.add(self.rgb_image_name.value, rgb_image)

        ##################
        # Display images #
        ##################
        if self.show_window:
            workspace.display_data.input_image_names = input_image_names
            workspace.display_data.rgb_pixel_data = rgb_pixel_data
            workspace.display_data.images = [
                imgset.get_image(name, must_be_grayscale=True).pixel_data
                for name in input_image_names
            ]
Exemple #19
0
    def run_single_measurement(self, group, workspace):
        """Classify objects based on one measurement"""
        object_name = group.object_name.value
        feature = group.measurement.value
        objects = workspace.object_set.get_objects(object_name)
        measurements = workspace.measurements
        values = measurements.get_current_measurement(object_name, feature)
        #
        # Pad values if too few (defensive programming).
        #
        if len(values) < objects.count:
            values = np.hstack(
                (values, [np.NaN] * (objects.count - len(values))))
        if group.bin_choice == BC_EVEN:
            low_threshold = group.low_threshold.value
            high_threshold = group.high_threshold.value
            bin_count = group.bin_count.value
            thresholds = (np.arange(bin_count + 1) *
                          (high_threshold - low_threshold) / float(bin_count) +
                          low_threshold)
        else:
            thresholds = [
                float(x.strip())
                for x in group.custom_thresholds.value.split(",")
            ]
        #
        # Put infinities at either end of the thresholds so we can bin the
        # low and high bins
        #
        thresholds = np.hstack((
            [-np.inf] if group.wants_low_bin else [],
            thresholds,
            [np.inf] if group.wants_high_bin else [],
        ))
        #
        # Do a cross-product of objects and threshold comparisons
        #
        ob_idx, th_idx = np.mgrid[0:len(values), 0:len(thresholds) - 1]
        bin_hits = (values[ob_idx] > thresholds[th_idx]) & (
            values[ob_idx] <= thresholds[th_idx + 1])
        num_values = len(values)
        for bin_idx, feature_name in enumerate(group.bin_feature_names()):
            measurement_name = "_".join((M_CATEGORY, feature_name))
            measurements.add_measurement(object_name, measurement_name,
                                         bin_hits[:, bin_idx].astype(int))
            measurement_name = "_".join(
                (M_CATEGORY, feature_name, F_NUM_PER_BIN))
            num_hits = bin_hits[:, bin_idx].sum()
            measurements.add_measurement(cpmeas.IMAGE, measurement_name,
                                         num_hits)
            measurement_name = "_".join(
                (M_CATEGORY, feature_name, F_PCT_PER_BIN))
            measurements.add_measurement(
                cpmeas.IMAGE,
                measurement_name,
                100.0 * float(num_hits) / num_values if num_values > 0 else 0,
            )
        if group.wants_images or self.show_window:
            colors = self.get_colors(bin_hits.shape[1])
            object_bins = np.sum(bin_hits * th_idx, 1) + 1
            object_color = np.hstack(([0], object_bins))
            object_color[np.hstack((False, np.isnan(values)))] = 0
            labels = object_color[objects.segmented]
            if group.wants_images:
                image = colors[labels, :3]
                workspace.image_set.add(
                    group.image_name.value,
                    cpi.Image(image, parent_image=objects.parent_image),
                )

            if self.show_window:
                workspace.display_data.bins.append(
                    object_bins[~np.isnan(values)])
                workspace.display_data.labels.append(labels)
                workspace.display_data.values.append(values[~np.isnan(values)])
Exemple #20
0
    def run_two_measurements(self, workspace):
        measurements = workspace.measurements
        in_high_class = []
        saved_values = []
        objects = workspace.object_set.get_objects(self.object_name.value)
        has_nan_measurement = np.zeros(objects.count, bool)
        for feature, threshold_method, threshold in (
            (self.first_measurement, self.first_threshold_method,
             self.first_threshold),
            (
                self.second_measurement,
                self.second_threshold_method,
                self.second_threshold,
            ),
        ):
            values = measurements.get_current_measurement(
                self.object_name.value, feature.value)
            if len(values) < objects.count:
                values = np.hstack(
                    (values, [np.NaN] * (objects.count - len(values))))
            saved_values.append(values)
            has_nan_measurement = has_nan_measurement | np.isnan(values)
            if threshold_method == TM_CUSTOM:
                t = threshold.value
            elif len(values) == 0:
                t = 0
            elif threshold_method == TM_MEAN:
                t = np.mean(values[~np.isnan(values)])
            elif threshold_method == TM_MEDIAN:
                t = np.median(values[~np.isnan(values)])
            else:
                raise ValueError("Unknown threshold method: %s" %
                                 threshold_method.value)
            in_high_class.append(values >= t)
        feature_names = self.get_feature_name_matrix()
        num_values = len(values)
        for i in range(2):
            for j in range(2):
                in_class = ((in_high_class[0].astype(int) == i)
                            & (in_high_class[1].astype(int) == j)
                            & (~has_nan_measurement))
                measurements.add_measurement(
                    self.object_name.value,
                    "_".join((M_CATEGORY, feature_names[i, j])),
                    in_class.astype(int),
                )
                num_hits = in_class.sum()
                measurement_name = "_".join(
                    (M_CATEGORY, feature_names[i, j], F_NUM_PER_BIN))
                measurements.add_measurement(cpmeas.IMAGE, measurement_name,
                                             num_hits)
                measurement_name = "_".join(
                    (M_CATEGORY, feature_names[i, j], F_PCT_PER_BIN))
                measurements.add_measurement(
                    cpmeas.IMAGE,
                    measurement_name,
                    100.0 * float(num_hits) /
                    num_values if num_values > 0 else 0,
                )

        if self.wants_image:
            class_1, class_2 = in_high_class
            object_codes = class_1.astype(int) + class_2.astype(int) * 2 + 1
            object_codes = np.hstack(([0], object_codes))
            object_codes[np.hstack((False, np.isnan(values)))] = 0
            nobjects = len(class_1)
            mapping = np.zeros(nobjects + 1, int)
            mapping[1:] = np.arange(1, nobjects + 1)
            labels = object_codes[mapping[objects.segmented]]
            colors = self.get_colors(4)
            image = colors[labels, :3]
            image = cpi.Image(image, parent_image=objects.parent_image)
            workspace.image_set.add(self.image_name.value, image)

        if self.show_window:
            workspace.display_data.in_high_class = in_high_class
            workspace.display_data.labels = (objects.segmented, )
            workspace.display_data.saved_values = saved_values
    def run(self, workspace):
        #
        # Get the input and output image names. You need to get the .value
        # because otherwise you'll get the setting object instead of
        # the string name.
        #
        input_image_name = self.input_image_name.value
        output_image_name = self.output_image_name.value
        #
        # Get the image set. The image set has all of the images in it.
        # The assert statement makes sure that it really is an image set,
        # but, more importantly, it lets my editor do context-sensitive
        # completion for the image set.
        #
        image_set = workspace.image_set
        # assert isinstance(image_set, cpi.ImageSet)
        #
        # Get the input image object. We want a grayscale image here.
        # The image set will convert a color image to a grayscale one
        # and warn the user.
        #
        input_image = image_set.get_image(input_image_name,
                                          must_be_grayscale=True)
        #
        # Get the pixels - these are a 2-d Numpy array.
        #
        pixels = input_image.pixel_data

        #
        #
        #
        if input_image.has_mask:
            mask = input_image.mask
        else:
            mask = np.ones(pixels.shape, bool)

        if self.transform_choice == M_FOURIER:
            output_pixels = fourier_transform(pixels, mask)
        elif self.transform_choice == M_TEST_FOURIER:
            output_pixels = check_fourier_transform(pixels, mask)
        elif self.transform_choice == M_CHEBYSHEV_T:
            M = self.M.value
            output_pixels = chebyshev_transform(pixels, M, mask)
        elif self.transform_choice == M_SIMONCELLI_P or self.transform_choice == M_SIMONCELLI_R or self.transform_choice == M_TEST_SIMONCELLI_P or self.transform_choice == M_TEST_SIMONCELLI_R or self.transform_choice == M_HAAR_S or self.transform_choice == M_HAAR_T or self.transform_choice == M_TEST_HAAR:
            scale = self.scale.value
            nx = len(pixels[0])
            ny = len(pixels)
            scale_max = np.log(np.min([nx, ny])) / np.log(2.0)
            if scale > scale_max:
                print("Maximum number of scales exceeded.")
                scale = int(scale_max)
            if self.transform_choice == M_SIMONCELLI_P:
                temp_output_pixels = simoncelli_transform_pyramid(
                    pixels, scale, mask)
                sizex = nx
                sizey = (
                    (np.power(2, scale + 1) - 1) * ny) / (np.power(2, scale))
                #sizex=nx
                #sizey=(scale)*ny
                output_pixels = np.zeros([sizex, sizey])
                for s in range(0, scale + 1):
                    output_pixels[
                        0:nx, ((np.power(2, s) - 1) * ny) /
                        np.power(2, s - 1):((np.power(2, s + 1) - 1) * ny) /
                        np.power(2, s)] = temp_output_pixels[s, 0:nx, 0:ny /
                                                             np.power(2, s)]
                    #output_pixels[0:nx, s*ny:(s+1)*ny]=temp_output_pixels[s,:,:]
            elif self.transform_choice == M_SIMONCELLI_R:
                temp_output_pixels = simoncelli_transform_redundant(
                    pixels, scale, mask)
                sizex = nx
                sizey = (scale + 1) * ny
                output_pixels = np.zeros([sizex, sizey])
                for s in range(0, scale + 1):
                    output_pixels[0:nx, s * ny:(s + 1) *
                                  ny] = temp_output_pixels[s, :, :]
            elif self.transform_choice == M_TEST_SIMONCELLI_P:
                output_pixels = check_simoncelli_transform_pyramid(
                    pixels, scale, mask)
            elif self.transform_choice == M_HAAR_T:
                output_pixels = haar_transform(pixels, scale, mask)
            elif self.transform_choice == M_HAAR_S:
                output_pixels = inverse_haar_transform(pixels, scale, mask)
            elif self.transform_choice == M_TEST_HAAR:
                output_pixels = check_haar_transform(pixels, scale, mask)
            else:
                output_pixels = check_simoncelli_transform_redundant(
                    pixels, scale, mask)
        else:
            raise NotImplementedError("Unimplemented transform: %s" %
                                      self.method.value)

        #
        # Make an image object. It's nice if you tell CellProfiler
        # about the parent image - the child inherits the parent's
        # cropping and masking, but it's not absolutely necessary
        #
        output_image = cpi.Image(output_pixels, parent_image=input_image)
        image_set.add(output_image_name, output_image)

        #
        # Save intermediate results for display if the window frame is on
        #
        if workspace.frame is not None:
            workspace.display_data.input_pixels = pixels
            workspace.display_data.output_pixels = output_pixels
Exemple #22
0
    def provide_image(self, image_set):
        """load an image plane from an omero server
        and return a 2-d grayscale image
        """
        # TODO: return 3d RGB images when c == None like loadimage.py does?

        if self.__is_cached:
            return self.__omero_image_plane

        gateway = self.__gateway
        pixels_id = self.__pixels_id
        z = self.__z
        c = self.__c
        t = self.__t

        # Retrieve the image data from the omero server
        pixels = self.__pixels
        omero_image_plane = gateway.getPlane(pixels_id, z, c, t)

        # Create a 'cellprofiler' image
        width = pixels.getSizeX().getValue()
        height = pixels.getSizeY().getValue()
        pixels_type = pixels.getPixelsType().getValue().getValue()

        # OMERO stores images in big endian format
        little_endian = False
        if pixels_type == INT_8:
            dtype = np.char
            scale = 255
        elif pixels_type == UINT_8:
            dtype = np.uint8
            scale = 255
        elif pixels_type == UINT_16:
            dtype = "<u2" if little_endian else ">u2"
            scale = 65535
        elif pixels_type == INT_16:
            dtype = "<i2" if little_endian else ">i2"
            scale = 65535
        elif pixels_type == UINT_32:
            dtype = "<u4" if little_endian else ">u4"
            scale = 2**32
        elif pixels_type == INT_32:
            dtype = "<i4" if little_endian else ">i4"
            scale = 2**32 - 1
        elif pixels_type == FLOAT:
            dtype = "<f4" if little_endian else ">f4"
            scale = 1
        elif pixels_type == DOUBLE:
            dtype = "<f8" if little_endian else ">f8"
            scale = 1
        else:
            raise NotImplementedError(
                "omero pixels type not implemented for %s" % pixels_type)
        # TODO: should something be done here with MaxSampleValue (like loadimages.py does)?

        image = np.frombuffer(omero_image_plane, dtype)
        image.shape = (height, width)
        image = image.astype(np.float32) / float(scale)
        image = cpimage.Image(image)
        self.__cpimage_data = image
        self.__is_cached = True
        return image
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)
        dimensions = len(objects.shape)
        assert isinstance(objects, cpo.Objects)
        has_pixels = objects.areas > 0
        labels = objects.small_removed_segmented
        kept_labels = objects.segmented
        neighbor_objects = workspace.object_set.get_objects(
            self.neighbors_name.value)
        neighbor_labels = neighbor_objects.small_removed_segmented
        neighbor_kept_labels = neighbor_objects.segmented
        assert isinstance(neighbor_objects, cpo.Objects)
        if not self.wants_excluded_objects.value:
            # Remove labels not present in kept segmentation while preserving object IDs.
            mask = neighbor_kept_labels > 0
            neighbor_labels[~mask] = 0
        nobjects = np.max(labels)
        nkept_objects = len(objects.indices)
        nneighbors = np.max(neighbor_labels)

        _, object_numbers = objects.relate_labels(labels, kept_labels)
        if self.neighbors_are_objects:
            neighbor_numbers = object_numbers
            neighbor_has_pixels = has_pixels
        else:
            _, neighbor_numbers = neighbor_objects.relate_labels(
                neighbor_labels, neighbor_objects.small_removed_segmented)
            neighbor_has_pixels = np.bincount(neighbor_labels.ravel())[1:] > 0
        neighbor_count = np.zeros((nobjects, ))
        pixel_count = np.zeros((nobjects, ))
        first_object_number = np.zeros((nobjects, ), int)
        second_object_number = np.zeros((nobjects, ), int)
        first_x_vector = np.zeros((nobjects, ))
        second_x_vector = np.zeros((nobjects, ))
        first_y_vector = np.zeros((nobjects, ))
        second_y_vector = np.zeros((nobjects, ))
        angle = np.zeros((nobjects, ))
        percent_touching = np.zeros((nobjects, ))
        expanded_labels = None
        if self.distance_method == D_EXPAND:
            # Find the i,j coordinates of the nearest foreground point
            # to every background point
            if dimensions == 2:
                i, j = scind.distance_transform_edt(labels == 0,
                                                    return_distances=False,
                                                    return_indices=True)
                # Assign each background pixel to the label of its nearest
                # foreground pixel. Assign label to label for foreground.
                labels = labels[i, j]
            else:
                k, i, j = scind.distance_transform_edt(labels == 0,
                                                       return_distances=False,
                                                       return_indices=True)
                labels = labels[k, i, j]
            expanded_labels = labels  # for display
            distance = 1  # dilate once to make touching edges overlap
            scale = S_EXPANDED
            if self.neighbors_are_objects:
                neighbor_labels = labels.copy()
        elif self.distance_method == D_WITHIN:
            distance = self.distance.value
            scale = str(distance)
        elif self.distance_method == D_ADJACENT:
            distance = 1
            scale = S_ADJACENT
        else:
            raise ValueError("Unknown distance method: %s" %
                             self.distance_method.value)
        if nneighbors > (1 if self.neighbors_are_objects else 0):
            first_objects = []
            second_objects = []
            object_indexes = np.arange(nobjects, dtype=np.int32) + 1
            #
            # First, compute the first and second nearest neighbors,
            # and the angles between self and the first and second
            # nearest neighbors
            #
            ocenters = centers_of_labels(
                objects.small_removed_segmented).transpose()
            ncenters = centers_of_labels(
                neighbor_objects.small_removed_segmented).transpose()
            areas = fix(
                scind.sum(np.ones(labels.shape), labels, object_indexes))
            perimeter_outlines = outline(labels)
            perimeters = fix(
                scind.sum(np.ones(labels.shape), perimeter_outlines,
                          object_indexes))

            i, j = np.mgrid[0:nobjects, 0:nneighbors]
            distance_matrix = np.sqrt((ocenters[i, 0] - ncenters[j, 0])**2 +
                                      (ocenters[i, 1] - ncenters[j, 1])**2)
            #
            # order[:,0] should be arange(nobjects)
            # order[:,1] should be the nearest neighbor
            # order[:,2] should be the next nearest neighbor
            #
            if distance_matrix.shape[1] == 1:
                # a little buggy, lexsort assumes that a 2-d array of
                # second dimension = 1 is a 1-d array
                order = np.zeros(distance_matrix.shape, int)
            else:
                order = np.lexsort([distance_matrix])
            first_neighbor = 1 if self.neighbors_are_objects else 0
            first_object_index = order[:, first_neighbor]
            first_x_vector = ncenters[first_object_index, 1] - ocenters[:, 1]
            first_y_vector = ncenters[first_object_index, 0] - ocenters[:, 0]
            if nneighbors > first_neighbor + 1:
                second_object_index = order[:, first_neighbor + 1]
                second_x_vector = ncenters[second_object_index,
                                           1] - ocenters[:, 1]
                second_y_vector = ncenters[second_object_index,
                                           0] - ocenters[:, 0]
                v1 = np.array((first_x_vector, first_y_vector))
                v2 = np.array((second_x_vector, second_y_vector))
                #
                # Project the unit vector v1 against the unit vector v2
                #
                dot = np.sum(v1 * v2, 0) / np.sqrt(
                    np.sum(v1**2, 0) * np.sum(v2**2, 0))
                angle = np.arccos(dot) * 180.0 / np.pi

            # Make the structuring element for dilation
            if dimensions == 2:
                strel = strel_disk(distance)
            else:
                strel = skimage.morphology.ball(distance)
            #
            # A little bigger one to enter into the border with a structure
            # that mimics the one used to create the outline
            #
            if dimensions == 2:
                strel_touching = strel_disk(distance + 0.5)
            else:
                strel_touching = skimage.morphology.ball(distance + 0.5)
            #
            # Get the extents for each object and calculate the patch
            # that excises the part of the image that is "distance"
            # away
            if dimensions == 2:
                i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1]]

                minimums_i, maximums_i, _, _ = scind.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scind.extrema(
                    j, labels, object_indexes)

                minimums_i = np.maximum(fix(minimums_i) - distance,
                                        0).astype(int)
                maximums_i = np.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_j = np.maximum(fix(minimums_j) - distance,
                                        0).astype(int)
                maximums_j = np.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[1]).astype(int)
            else:
                k, i, j = np.mgrid[0:labels.shape[0], 0:labels.shape[1],
                                   0:labels.shape[2]]

                minimums_k, maximums_k, _, _ = scind.extrema(
                    k, labels, object_indexes)
                minimums_i, maximums_i, _, _ = scind.extrema(
                    i, labels, object_indexes)
                minimums_j, maximums_j, _, _ = scind.extrema(
                    j, labels, object_indexes)

                minimums_k = np.maximum(fix(minimums_k) - distance,
                                        0).astype(int)
                maximums_k = np.minimum(
                    fix(maximums_k) + distance + 1,
                    labels.shape[0]).astype(int)
                minimums_i = np.maximum(fix(minimums_i) - distance,
                                        0).astype(int)
                maximums_i = np.minimum(
                    fix(maximums_i) + distance + 1,
                    labels.shape[1]).astype(int)
                minimums_j = np.maximum(fix(minimums_j) - distance,
                                        0).astype(int)
                maximums_j = np.minimum(
                    fix(maximums_j) + distance + 1,
                    labels.shape[2]).astype(int)
            #
            # Loop over all objects
            # Calculate which ones overlap "index"
            # Calculate how much overlap there is of others to "index"
            #
            for object_number in object_numbers:
                if object_number == 0:
                    #
                    # No corresponding object in small-removed. This means
                    # that the object has no pixels, e.g., not renumbered.
                    #
                    continue
                index = object_number - 1
                if dimensions == 2:
                    patch = labels[minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]
                else:
                    patch = labels[minimums_k[index]:maximums_k[index],
                                   minimums_i[index]:maximums_i[index],
                                   minimums_j[index]:maximums_j[index], ]
                    npatch = neighbor_labels[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ]

                #
                # Find the neighbors
                #
                patch_mask = patch == (index + 1)
                extended = scind.binary_dilation(patch_mask, strel)
                neighbors = np.unique(npatch[extended])
                neighbors = neighbors[neighbors != 0]
                if self.neighbors_are_objects:
                    neighbors = neighbors[neighbors != object_number]
                nc = len(neighbors)
                neighbor_count[index] = nc
                if nc > 0:
                    first_objects.append(np.ones(nc, int) * object_number)
                    second_objects.append(neighbors)
                #
                # Find the # of overlapping pixels. Dilate the neighbors
                # and see how many pixels overlap our image. Use a 3x3
                # structuring element to expand the overlapping edge
                # into the perimeter.
                #
                if dimensions == 2:
                    outline_patch = (perimeter_outlines[
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                else:
                    outline_patch = (perimeter_outlines[
                        minimums_k[index]:maximums_k[index],
                        minimums_i[index]:maximums_i[index],
                        minimums_j[index]:maximums_j[index], ] == object_number
                                     )
                if self.neighbors_are_objects:
                    extended = scind.binary_dilation(
                        (patch != 0) & (patch != object_number),
                        strel_touching)
                else:
                    extended = scind.binary_dilation((npatch != 0),
                                                     strel_touching)
                overlap = np.sum(outline_patch & extended)
                pixel_count[index] = overlap
            if sum([len(x) for x in first_objects]) > 0:
                first_objects = np.hstack(first_objects)
                reverse_object_numbers = np.zeros(
                    max(np.max(object_numbers), np.max(first_objects)) + 1,
                    int)
                reverse_object_numbers[object_numbers] = (
                    np.arange(len(object_numbers)) + 1)
                first_objects = reverse_object_numbers[first_objects]

                second_objects = np.hstack(second_objects)
                reverse_neighbor_numbers = np.zeros(
                    max(np.max(neighbor_numbers), np.max(second_objects)) + 1,
                    int)
                reverse_neighbor_numbers[neighbor_numbers] = (
                    np.arange(len(neighbor_numbers)) + 1)
                second_objects = reverse_neighbor_numbers[second_objects]
                to_keep = (first_objects > 0) & (second_objects > 0)
                first_objects = first_objects[to_keep]
                second_objects = second_objects[to_keep]
            else:
                first_objects = np.zeros(0, int)
                second_objects = np.zeros(0, int)
            percent_touching = pixel_count * 100 / perimeters
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            #
            # Have to recompute nearest
            #
            first_object_number = np.zeros(nkept_objects, int)
            second_object_number = np.zeros(nkept_objects, int)
            if nkept_objects > (1 if self.neighbors_are_objects else 0):
                di = (ocenters[object_indexes[:, np.newaxis], 0] -
                      ncenters[neighbor_indexes[np.newaxis, :], 0])
                dj = (ocenters[object_indexes[:, np.newaxis], 1] -
                      ncenters[neighbor_indexes[np.newaxis, :], 1])
                distance_matrix = np.sqrt(di * di + dj * dj)
                distance_matrix[~has_pixels, :] = np.inf
                distance_matrix[:, ~neighbor_has_pixels] = np.inf
                #
                # order[:,0] should be arange(nobjects)
                # order[:,1] should be the nearest neighbor
                # order[:,2] should be the next nearest neighbor
                #
                order = np.lexsort([distance_matrix
                                    ]).astype(first_object_number.dtype)
                if self.neighbors_are_objects:
                    first_object_number[has_pixels] = order[has_pixels, 1] + 1
                    if nkept_objects > 2:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 2] + 1
                else:
                    first_object_number[has_pixels] = order[has_pixels, 0] + 1
                    if order.shape[1] > 1:
                        second_object_number[has_pixels] = order[has_pixels,
                                                                 1] + 1
        else:
            object_indexes = object_numbers - 1
            neighbor_indexes = neighbor_numbers - 1
            first_objects = np.zeros(0, int)
            second_objects = np.zeros(0, int)
        #
        # Now convert all measurements from the small-removed to
        # the final number set.
        #
        neighbor_count = neighbor_count[object_indexes]
        neighbor_count[~has_pixels] = 0
        percent_touching = percent_touching[object_indexes]
        percent_touching[~has_pixels] = 0
        first_x_vector = first_x_vector[object_indexes]
        second_x_vector = second_x_vector[object_indexes]
        first_y_vector = first_y_vector[object_indexes]
        second_y_vector = second_y_vector[object_indexes]
        angle = angle[object_indexes]
        #
        # Record the measurements
        #
        assert isinstance(workspace, cpw.Workspace)
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        image_set = workspace.image_set
        features_and_data = [
            (M_NUMBER_OF_NEIGHBORS, neighbor_count),
            (M_FIRST_CLOSEST_OBJECT_NUMBER, first_object_number),
            (
                M_FIRST_CLOSEST_DISTANCE,
                np.sqrt(first_x_vector**2 + first_y_vector**2),
            ),
            (M_SECOND_CLOSEST_OBJECT_NUMBER, second_object_number),
            (
                M_SECOND_CLOSEST_DISTANCE,
                np.sqrt(second_x_vector**2 + second_y_vector**2),
            ),
            (M_ANGLE_BETWEEN_NEIGHBORS, angle),
            (M_PERCENT_TOUCHING, percent_touching),
        ]
        for feature_name, data in features_and_data:
            m.add_measurement(self.object_name.value,
                              self.get_measurement_name(feature_name), data)
        if len(first_objects) > 0:
            m.add_relate_measurement(
                self.module_num,
                cpmeas.NEIGHBORS,
                self.object_name.value,
                self.object_name.value
                if self.neighbors_are_objects else self.neighbors_name.value,
                m.image_set_number * np.ones(first_objects.shape, int),
                first_objects,
                m.image_set_number * np.ones(second_objects.shape, int),
                second_objects,
            )

        labels = kept_labels

        neighbor_count_image = np.zeros(labels.shape, int)
        object_mask = objects.segmented != 0
        object_indexes = objects.segmented[object_mask] - 1
        neighbor_count_image[object_mask] = neighbor_count[object_indexes]
        workspace.display_data.neighbor_count_image = neighbor_count_image

        percent_touching_image = np.zeros(labels.shape)
        percent_touching_image[object_mask] = percent_touching[object_indexes]
        workspace.display_data.percent_touching_image = percent_touching_image

        image_set = workspace.image_set
        if self.wants_count_image.value:
            neighbor_cm_name = self.count_colormap.value
            neighbor_cm = get_colormap(neighbor_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=neighbor_cm)
            img = sm.to_rgba(neighbor_count_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            count_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.count_image_name.value, count_image)
        else:
            neighbor_cm_name = cpprefs.get_default_colormap()
            neighbor_cm = matplotlib.cm.get_cmap(neighbor_cm_name)
        if self.wants_percent_touching_image:
            percent_touching_cm_name = self.touching_colormap.value
            percent_touching_cm = get_colormap(percent_touching_cm_name)
            sm = matplotlib.cm.ScalarMappable(cmap=percent_touching_cm)
            img = sm.to_rgba(percent_touching_image)[:, :, :3]
            img[:, :, 0][~object_mask] = 0
            img[:, :, 1][~object_mask] = 0
            img[:, :, 2][~object_mask] = 0
            touching_image = cpi.Image(img, masking_objects=objects)
            image_set.add(self.touching_image_name.value, touching_image)
        else:
            percent_touching_cm_name = cpprefs.get_default_colormap()
            percent_touching_cm = matplotlib.cm.get_cmap(
                percent_touching_cm_name)

        if self.show_window:
            workspace.display_data.neighbor_cm_name = neighbor_cm_name
            workspace.display_data.percent_touching_cm_name = percent_touching_cm_name
            workspace.display_data.orig_labels = objects.segmented
            workspace.display_data.neighbor_labels = neighbor_labels
            workspace.display_data.expanded_labels = expanded_labels
            workspace.display_data.object_mask = object_mask
            workspace.display_data.dimensions = dimensions
Exemple #24
0
    def run(self, workspace):
        image = workspace.image_set.get_image(self.image_name.value,
                                              must_be_grayscale=True)
        orig_pixels = image.pixel_data
        if image.has_mask:
            mask = image.mask
        else:
            mask = np.ones(orig_pixels.shape, bool)
        if self.method == M_SOBEL:
            if self.direction == E_ALL:
                output_pixels = sobel(orig_pixels, mask)
            elif self.direction == E_HORIZONTAL:
                output_pixels = hsobel(orig_pixels, mask)
            elif self.direction == E_VERTICAL:
                output_pixels = vsobel(orig_pixels, mask)
            else:
                raise NotImplementedError(
                    "Unimplemented direction for Sobel: %s",
                    self.direction.value)
        elif self.method == M_LOG:
            sigma = self.get_sigma()
            size = int(sigma * 4) + 1
            output_pixels = laplacian_of_gaussian(orig_pixels, mask, size,
                                                  sigma)
        elif self.method == M_PREWITT:
            if self.direction == E_ALL:
                output_pixels = prewitt(orig_pixels)
            elif self.direction == E_HORIZONTAL:
                output_pixels = hprewitt(orig_pixels, mask)
            elif self.direction == E_VERTICAL:
                output_pixels = vprewitt(orig_pixels, mask)
            else:
                raise NotImplementedError(
                    "Unimplemented direction for Prewitt: %s",
                    self.direction.value)
        elif self.method == M_CANNY:
            high_threshold = self.manual_threshold.value
            low_threshold = self.low_threshold.value
            if (self.wants_automatic_low_threshold.value
                    or self.wants_automatic_threshold.value):
                sobel_image = sobel(orig_pixels, mask)
                low, high = otsu3(sobel_image[mask])
                if self.wants_automatic_low_threshold.value:
                    low_threshold = low * self.threshold_adjustment_factor.value
                if self.wants_automatic_threshold.value:
                    high_threshold = high * self.threshold_adjustment_factor.value
            output_pixels = canny(orig_pixels, mask, self.get_sigma(),
                                  low_threshold, high_threshold)
        elif self.method == M_ROBERTS:
            output_pixels = roberts(orig_pixels, mask)
        elif self.method == M_KIRSCH:
            output_pixels = kirsch(orig_pixels)
        else:
            raise NotImplementedError(
                "Unimplemented edge detection method: %s" % self.method.value)

        output_image = cpi.Image(output_pixels, parent_image=image)
        workspace.image_set.add(self.output_image_name.value, output_image)

        if self.show_window:
            workspace.display_data.orig_pixels = orig_pixels
            workspace.display_data.output_pixels = output_pixels
    def run(self, workspace):
        """Run the module on the image set"""
        seed_objects_name = self.seed_objects_name.value
        skeleton_name = self.image_name.value
        seed_objects = workspace.object_set.get_objects(seed_objects_name)
        labels = seed_objects.segmented
        labels_count = np.max(labels)
        label_range = np.arange(labels_count, dtype=np.int32) + 1

        skeleton_image = workspace.image_set.get_image(skeleton_name,
                                                       must_be_binary=True)
        skeleton = skeleton_image.pixel_data
        if skeleton_image.has_mask:
            skeleton = skeleton & skeleton_image.mask
        try:
            labels = skeleton_image.crop_image_similarly(labels)
        except:
            labels, m1 = cpo.size_similarly(skeleton, labels)
            labels[~m1] = 0
        #
        # The following code makes a ring around the seed objects with
        # the skeleton trunks sticking out of it.
        #
        # Create a new skeleton with holes at the seed objects
        # First combine the seed objects with the skeleton so
        # that the skeleton trunks come out of the seed objects.
        #
        # Erode the labels once so that all of the trunk branchpoints
        # will be within the labels
        #
        #
        # Dilate the objects, then subtract them to make a ring
        #
        my_disk = morph.strel_disk(1.5).astype(int)
        dilated_labels = grey_dilation(labels, footprint=my_disk)
        seed_mask = dilated_labels > 0
        combined_skel = skeleton | seed_mask

        closed_labels = grey_erosion(dilated_labels, footprint=my_disk)
        seed_center = closed_labels > 0
        combined_skel = combined_skel & (~seed_center)
        #
        # Fill in single holes (but not a one-pixel hole made by
        # a one-pixel image)
        #
        if self.wants_to_fill_holes:

            def size_fn(area, is_object):
                return (~is_object) and (area <= self.maximum_hole_size.value)

            combined_skel = morph.fill_labeled_holes(combined_skel,
                                                     ~seed_center, size_fn)
        #
        # Reskeletonize to make true branchpoints at the ring boundaries
        #
        combined_skel = morph.skeletonize(combined_skel)
        #
        # The skeleton outside of the labels
        #
        outside_skel = combined_skel & (dilated_labels == 0)
        #
        # Associate all skeleton points with seed objects
        #
        dlabels, distance_map = propagate.propagate(np.zeros(labels.shape),
                                                    dilated_labels,
                                                    combined_skel, 1)
        #
        # Get rid of any branchpoints not connected to seeds
        #
        combined_skel[dlabels == 0] = False
        #
        # Find the branchpoints
        #
        branch_points = morph.branchpoints(combined_skel)
        #
        # Odd case: when four branches meet like this, branchpoints are not
        # assigned because they are arbitrary. So assign them.
        #
        # .  .
        #  B.
        #  .B
        # .  .
        #
        odd_case = (combined_skel[:-1, :-1]
                    & combined_skel[1:, :-1]
                    & combined_skel[:-1, 1:]
                    & combined_skel[1, 1])
        branch_points[:-1, :-1][odd_case] = True
        branch_points[1:, 1:][odd_case] = True
        #
        # Find the branching counts for the trunks (# of extra branches
        # emanating from a point other than the line it might be on).
        #
        branching_counts = morph.branchings(combined_skel)
        branching_counts = np.array([0, 0, 0, 1, 2])[branching_counts]
        #
        # Only take branches within 1 of the outside skeleton
        #
        dilated_skel = scind.binary_dilation(outside_skel, morph.eight_connect)
        branching_counts[~dilated_skel] = 0
        #
        # Find the endpoints
        #
        end_points = morph.endpoints(combined_skel)
        #
        # We use two ranges for classification here:
        # * anything within one pixel of the dilated image is a trunk
        # * anything outside of that range is a branch
        #
        nearby_labels = dlabels.copy()
        nearby_labels[distance_map > 1.5] = 0

        outside_labels = dlabels.copy()
        outside_labels[nearby_labels > 0] = 0
        #
        # The trunks are the branchpoints that lie within one pixel of
        # the dilated image.
        #
        if labels_count > 0:
            trunk_counts = fix(
                scind.sum(branching_counts, nearby_labels,
                          label_range)).astype(int)
        else:
            trunk_counts = np.zeros((0, ), int)
        #
        # The branches are the branchpoints that lie outside the seed objects
        #
        if labels_count > 0:
            branch_counts = fix(
                scind.sum(branch_points, outside_labels, label_range))
        else:
            branch_counts = np.zeros((0, ), int)
        #
        # Save the endpoints
        #
        if labels_count > 0:
            end_counts = fix(scind.sum(end_points, outside_labels,
                                       label_range))
        else:
            end_counts = np.zeros((0, ), int)
        #
        # Calculate the distances
        #
        total_distance = morph.skeleton_length(dlabels * outside_skel,
                                               label_range)
        #
        # Save measurements
        #
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        feature = "_".join((C_OBJSKELETON, F_NUMBER_TRUNKS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, trunk_counts)
        feature = "_".join(
            (C_OBJSKELETON, F_NUMBER_NON_TRUNK_BRANCHES, skeleton_name))
        m.add_measurement(seed_objects_name, feature, branch_counts)
        feature = "_".join(
            (C_OBJSKELETON, F_NUMBER_BRANCH_ENDS, skeleton_name))
        m.add_measurement(seed_objects_name, feature, end_counts)
        feature = "_".join(
            (C_OBJSKELETON, F_TOTAL_OBJSKELETON_LENGTH, skeleton_name))
        m[seed_objects_name, feature] = total_distance
        #
        # Collect the graph information
        #
        if self.wants_objskeleton_graph:
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            intensity_image = workspace.image_set.get_image(
                self.intensity_image_name.value)
            edge_graph, vertex_graph = self.make_objskeleton_graph(
                combined_skel,
                dlabels,
                trunk_mask,
                branch_points & ~trunk_mask,
                end_points,
                intensity_image.pixel_data,
            )

            image_number = workspace.measurements.image_set_number

            edge_path, vertex_path = self.get_graph_file_paths(
                m, m.image_number)
            workspace.interaction_request(
                self,
                m.image_number,
                edge_path,
                edge_graph,
                vertex_path,
                vertex_graph,
                headless_ok=True,
            )

            if self.show_window:
                workspace.display_data.edge_graph = edge_graph
                workspace.display_data.vertex_graph = vertex_graph
                workspace.display_data.intensity_image = intensity_image.pixel_data
        #
        # Make the display image
        #
        if self.show_window or self.wants_branchpoint_image:
            branchpoint_image = np.zeros(
                (skeleton.shape[0], skeleton.shape[1], 3))
            trunk_mask = (branching_counts > 0) & (nearby_labels != 0)
            branch_mask = branch_points & (outside_labels != 0)
            end_mask = end_points & (outside_labels != 0)
            branchpoint_image[outside_skel, :] = 1
            branchpoint_image[trunk_mask | branch_mask | end_mask, :] = 0
            branchpoint_image[trunk_mask, 0] = 1
            branchpoint_image[branch_mask, 1] = 1
            branchpoint_image[end_mask, 2] = 1
            branchpoint_image[dilated_labels != 0, :] *= 0.875
            branchpoint_image[dilated_labels != 0, :] += 0.1
            if self.show_window:
                workspace.display_data.branchpoint_image = branchpoint_image
            if self.wants_branchpoint_image:
                bi = cpi.Image(branchpoint_image, parent_image=skeleton_image)
                workspace.image_set.add(self.branchpoint_image_name.value, bi)
    def run(self, workspace):
        image_set = workspace.image_set
        shape = None
        if self.input_color_choice == CC_GRAYSCALE:
            if self.wants_red_input.value:
                red_image = image_set.get_image(
                    self.red_input_image.value,
                    must_be_grayscale=True).pixel_data
                shape = red_image.shape
            else:
                red_image = 0
            if self.wants_green_input.value:
                green_image = image_set.get_image(
                    self.green_input_image.value,
                    must_be_grayscale=True).pixel_data
                shape = green_image.shape
            else:
                green_image = 0
            if self.wants_blue_input.value:
                blue_image = image_set.get_image(
                    self.blue_input_image.value,
                    must_be_grayscale=True).pixel_data
                shape = blue_image.shape
            else:
                blue_image = 0
            color_image = np.zeros((shape[0], shape[1], 3))
            color_image[:, :, 0] = red_image
            color_image[:, :, 1] = green_image
            color_image[:, :, 2] = blue_image
            red_image = color_image[:, :, 0]
            green_image = color_image[:, :, 1]
            blue_image = color_image[:, :, 2]
        elif self.input_color_choice == CC_COLOR:
            color_image = image_set.get_image(self.color_input_image.value,
                                              must_be_color=True).pixel_data
            red_image = color_image[:, :, 0]
            green_image = color_image[:, :, 1]
            blue_image = color_image[:, :, 2]
        else:
            raise ValueError("Unimplemented color choice: %s" %
                             self.input_color_choice.value)
        inverted_red = (1 - green_image) * (1 - blue_image)
        inverted_green = (1 - red_image) * (1 - blue_image)
        inverted_blue = (1 - red_image) * (1 - green_image)
        inverted_color = np.dstack(
            (inverted_red, inverted_green, inverted_blue))
        if self.output_color_choice == CC_GRAYSCALE:
            for wants_output, output_image_name, output_image in (
                (self.wants_red_output, self.red_output_image, inverted_red),
                (self.wants_green_output, self.green_output_image,
                 inverted_green),
                (self.wants_blue_output, self.blue_output_image,
                 inverted_blue),
            ):
                if wants_output.value:
                    image = cpi.Image(output_image)
                    image_set.add(output_image_name.value, image)
        elif self.output_color_choice == CC_COLOR:
            image = cpi.Image(inverted_color)
            image_set.add(self.color_output_image.value, image)
        else:
            raise ValueError("Unimplemented color choice: %s" %
                             self.output_color_choice.value)

        if self.show_window:
            workspace.display_data.color_image = color_image
            workspace.display_data.inverted_color = inverted_color
Exemple #27
0
    def run(self, workspace):
        image_set = workspace.image_set
        image = image_set.get_image(self.image_name.value)
        pixel_data = image.pixel_data.copy()
        mask = image.mask

        if self.flip_choice != FLIP_NONE:
            if self.flip_choice == FLIP_LEFT_TO_RIGHT:
                i, j = np.mgrid[0:pixel_data.shape[0],
                                pixel_data.shape[1] - 1:-1:-1]
            elif self.flip_choice == FLIP_TOP_TO_BOTTOM:
                i, j = np.mgrid[pixel_data.shape[0] - 1:-1:-1,
                                0:pixel_data.shape[1]]
            elif self.flip_choice == FLIP_BOTH:
                i, j = np.mgrid[pixel_data.shape[0] - 1:-1:-1,
                                pixel_data.shape[1] - 1:-1:-1]
            else:
                raise NotImplementedError("Unknown flipping operation: %s" %
                                          self.flip_choice.value)
            mask = mask[i, j]
            if pixel_data.ndim == 2:
                pixel_data = pixel_data[i, j]
            else:
                pixel_data = pixel_data[i, j, :]

        if self.rotate_choice != ROTATE_NONE:
            if self.rotate_choice == ROTATE_ANGLE:
                angle = self.angle.value
            elif self.rotate_choice == ROTATE_COORDINATES:
                xdiff = self.second_pixel.x - self.first_pixel.x
                ydiff = self.second_pixel.y - self.first_pixel.y
                if self.horiz_or_vert == C_VERTICALLY:
                    angle = -np.arctan2(ydiff, xdiff) * 180.0 / np.pi
                elif self.horiz_or_vert == C_HORIZONTALLY:
                    angle = np.arctan2(xdiff, ydiff) * 180.0 / np.pi
                else:
                    raise NotImplementedError("Unknown axis: %s" %
                                              self.horiz_or_vert.value)
            elif self.rotate_choice == ROTATE_MOUSE:
                d = self.get_dictionary()
                if (self.how_often == IO_ONCE and D_ANGLE in d
                        and d[D_ANGLE] is not None):
                    angle = d[D_ANGLE]
                else:
                    angle = workspace.interaction_request(
                        self, pixel_data,
                        workspace.measurements.image_set_number)
                if self.how_often == IO_ONCE:
                    d[D_ANGLE] = angle
            else:
                raise NotImplementedError("Unknown rotation method: %s" %
                                          self.rotate_choice.value)
            rangle = angle * np.pi / 180.0
            mask = scind.rotate(mask.astype(float), angle, reshape=True) > 0.50
            crop = (scind.rotate(
                np.ones(pixel_data.shape[:2]), angle, reshape=True) > 0.50)
            mask = mask & crop
            pixel_data = scind.rotate(pixel_data, angle, reshape=True)
            if self.wants_crop.value:
                #
                # We want to find the largest rectangle that fits inside
                # the crop. The cumulative sum in the i and j direction gives
                # the length of the rectangle in each direction and
                # multiplying them gives you the area.
                #
                # The left and right halves are symmetric, so we compute
                # on just two of the quadrants.
                #
                half = (np.array(crop.shape) / 2).astype(int)
                #
                # Operate on the lower right
                #
                quartercrop = crop[half[0]:, half[1]:]
                ci = np.cumsum(quartercrop, 0)
                cj = np.cumsum(quartercrop, 1)
                carea_d = ci * cj
                carea_d[quartercrop == 0] = 0
                #
                # Operate on the upper right by flipping I
                #
                quartercrop = crop[crop.shape[0] - half[0] - 1::-1, half[1]:]
                ci = np.cumsum(quartercrop, 0)
                cj = np.cumsum(quartercrop, 1)
                carea_u = ci * cj
                carea_u[quartercrop == 0] = 0
                carea = carea_d + carea_u
                max_carea = np.max(carea)
                max_area = np.argwhere(carea == max_carea)[0] + half
                min_i = max(crop.shape[0] - max_area[0] - 1, 0)
                max_i = max_area[0] + 1
                min_j = max(crop.shape[1] - max_area[1] - 1, 0)
                max_j = max_area[1] + 1
                ii = np.index_exp[min_i:max_i, min_j:max_j]
                crop = np.zeros(pixel_data.shape, bool)
                crop[ii] = True
                mask = mask[ii]
                pixel_data = pixel_data[ii]
            else:
                crop = None
        else:
            crop = None
            angle = 0
        output_image = cpi.Image(pixel_data, mask, crop, image)
        image_set.add(self.output_name.value, output_image)
        workspace.measurements.add_image_measurement(
            M_ROTATION_F % self.output_name.value, angle)

        vmin = min(np.min(image.pixel_data),
                   np.min(output_image.pixel_data[output_image.mask]))
        vmax = max(np.max(image.pixel_data),
                   np.max(output_image.pixel_data[output_image.mask]))
        workspace.display_data.image_pixel_data = image.pixel_data
        workspace.display_data.output_image_pixel_data = output_image.pixel_data
        workspace.display_data.vmin = vmin
        workspace.display_data.vmax = vmax
Exemple #28
0
    def run(self, workspace):
        """Run the module

        workspace    - The workspace contains
            pipeline     - instance of cpp for this run
            image_set    - the images in the image set being processed
            object_set   - the objects (labeled masks) in this image set
            measurements - the measurements for this run
            frame        - the parent frame to whatever frame is created. None means don't draw.
        """
        background_image = self.get_background_image(workspace, None)

        if (self.each_or_once == EO_ONCE
                and self.get_good_gridding(workspace) is not None):
            gridding = self.get_good_gridding(workspace)
        if self.auto_or_manual == AM_AUTOMATIC:
            gridding = self.run_automatic(workspace)
        elif self.manual_choice == MAN_COORDINATES:
            gridding = self.run_coordinates(workspace)
        elif self.manual_choice == MAN_MOUSE:
            gridding = workspace.interaction_request(
                self, background_image,
                workspace.measurements.image_set_number)
        self.set_good_gridding(workspace, gridding)
        workspace.set_grid(self.grid_image.value, gridding)
        #
        # Save measurements
        #
        self.add_measurement(
            workspace,
            F_X_LOCATION_OF_LOWEST_X_SPOT,
            gridding.x_location_of_lowest_x_spot,
        )
        self.add_measurement(
            workspace,
            F_Y_LOCATION_OF_LOWEST_Y_SPOT,
            gridding.y_location_of_lowest_y_spot,
        )
        self.add_measurement(workspace, F_ROWS, gridding.rows)
        self.add_measurement(workspace, F_COLUMNS, gridding.columns)
        self.add_measurement(workspace, F_X_SPACING, gridding.x_spacing)
        self.add_measurement(workspace, F_Y_SPACING, gridding.y_spacing)

        # update background image
        background_image = self.get_background_image(workspace, gridding)

        workspace.display_data.gridding = gridding.serialize()
        workspace.display_data.background_image = background_image
        workspace.display_data.image_set_number = (
            workspace.measurements.image_set_number)

        if self.wants_image:
            import matplotlib.transforms
            import matplotlib.figure
            import matplotlib.backends.backend_agg
            from cellprofiler.gui.tools import figure_to_image

            figure = matplotlib.figure.Figure()
            canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
            ax = figure.add_subplot(1, 1, 1)
            self.display_grid(background_image, gridding,
                              workspace.measurements.image_set_number, ax)
            #
            # This is the recipe for just showing the axis
            #
            figure.set_frameon(False)
            ax.set_axis_off()
            figure.subplots_adjust(0, 0, 1, 1, 0, 0)
            ai = ax.images[0]
            shape = ai.get_size()
            dpi = figure.dpi
            width = float(shape[1]) / dpi
            height = float(shape[0]) / dpi
            figure.set_figheight(height)
            figure.set_figwidth(width)
            bbox = matplotlib.transforms.Bbox(
                np.array([[0.0, 0.0], [width, height]]))
            transform = matplotlib.transforms.Affine2D(
                np.array([[dpi, 0, 0], [0, dpi, 0], [0, 0, 1]]))
            figure.bbox = matplotlib.transforms.TransformedBbox(
                bbox, transform)
            image_pixels = figure_to_image(figure, dpi=dpi)
            image = cpi.Image(image_pixels)

            workspace.image_set.add(self.save_image_name.value, image)
Exemple #29
0
    def run_image(self, image, workspace):
        """Perform illumination according to the parameters of one image setting group

        """
        #
        # Get the image names from the settings
        #
        image_name = image.image_name.value
        illum_correct_name = image.illum_correct_function_image_name.value
        corrected_image_name = image.corrected_image_name.value
        #
        # Get images from the image set
        #
        orig_image = workspace.image_set.get_image(image_name)
        illum_function = workspace.image_set.get_image(illum_correct_name)
        illum_function_pixel_data = illum_function.pixel_data
        if orig_image.pixel_data.ndim == 2:
            illum_function = workspace.image_set.get_image(
                illum_correct_name, must_be_grayscale=True
            )
        else:
            if illum_function_pixel_data.ndim == 2:
                illum_function_pixel_data = illum_function_pixel_data[:, :, np.newaxis]
        # Throw an error if image and illum data are incompatible
        if orig_image.pixel_data.shape != illum_function_pixel_data.shape:
            raise ValueError(
                "This module requires that the image and illumination function have equal dimensions.\n"
                "The %s image and %s illumination function do not (%s vs %s).\n"
                "If they are paired correctly you may want to use the Resize or Crop module to make them the same size."
                % (
                    image_name,
                    illum_correct_name,
                    orig_image.pixel_data.shape,
                    illum_function_pixel_data.shape,
                )
            )
        #
        # Either divide or subtract the illumination image from the original
        #
        if image.divide_or_subtract == DOS_DIVIDE:
            output_pixels = orig_image.pixel_data / illum_function_pixel_data
        elif image.divide_or_subtract == DOS_SUBTRACT:
            output_pixels = orig_image.pixel_data - illum_function_pixel_data
            output_pixels[output_pixels < 0] = 0
        else:
            raise ValueError(
                "Unhandled option for divide or subtract: %s"
                % image.divide_or_subtract.value
            )
        #
        # Save the output image in the image set and have it inherit
        # mask & cropping from the original image.
        #
        output_image = cpi.Image(output_pixels, parent_image=orig_image)
        workspace.image_set.add(corrected_image_name, output_image)
        #
        # Save images for display
        #
        if self.show_window:
            if not hasattr(workspace.display_data, "images"):
                workspace.display_data.images = {}
            workspace.display_data.images[image_name] = orig_image.pixel_data
            workspace.display_data.images[corrected_image_name] = output_pixels
            workspace.display_data.images[
                illum_correct_name
            ] = illum_function.pixel_data