예제 #1
0
 def make_workspace(self, images, masks):
     pipeline = Pipeline()
     object_set = ObjectSet()
     image_set_list = ImageSetList()
     image_set = image_set_list.get_image_set(0)
     module = Align()
     workspace = Workspace(pipeline, module, image_set, object_set,
                           Measurements(), image_set_list)
     for index, (pixels, mask) in enumerate(zip(images, masks)):
         if mask is None:
             image = Image(pixels)
         else:
             image = Image(pixels, mask=mask)
         input_name = "Channel%d" % index
         output_name = "Aligned%d" % index
         image_set.add(input_name, image)
         if index == 0:
             module.first_input_image.value = input_name
             module.first_output_image.value = output_name
         elif index == 1:
             module.second_input_image.value = input_name
             module.second_output_image.value = output_name
         else:
             module.add_image()
             ai = module.additional_images[-1]
             ai.input_image_name.value = input_name
             ai.output_image_name.value = output_name
     return workspace, module
    def test_set_image(self):
        x = numpy.zeros((224, 224, 3), numpy.float32)

        image = Image(x)

        y = numpy.zeros((224, 224, 3), numpy.float32)

        image.set_image(y)

        numpy.testing.assert_array_equal(image.get_image(), y)
    def test_has_parent_image(self):
        x = numpy.zeros((224, 224, 3), numpy.float32)

        parent_image = Image(x)

        assert not parent_image.has_parent_image

        image = Image(x, parent_image=parent_image)

        assert image.has_parent_image
예제 #4
0
    def run(self, workspace):

        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data = x.pixel_data

        y_data = scipy.ndimage.median_filter(x_data,
                                             self.window.value,
                                             mode='constant')

        y = Image(dimensions=dimensions,
                  image=y_data,
                  parent_image=x,
                  convert=False)

        images.add(y_name, y)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
예제 #5
0
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data = x.pixel_data

        y_data = skimage.restoration.denoise_nl_means(
            fast_mode=True,
            h=self.cutoff_distance.value,
            image=x_data,
            multichannel=x.multichannel,
            patch_distance=self.distance.value,
            patch_size=self.size.value,
        )

        y = Image(dimensions=dimensions, image=y_data, parent_image=x)

        images.add(y_name, y)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
예제 #6
0
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data = x.pixel_data

        sigma = numpy.divide(self.sigma.value, x.spacing)

        y_data = skimage.filters.gaussian(x_data, sigma=sigma)

        y = Image(dimensions=dimensions, image=y_data, parent_image=x)

        images.add(y_name, y)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
예제 #7
0
 def run_on_output(self, workspace, input_image, output):
     """Produce one image - storing it in the image set"""
     input_pixels = input_image.pixel_data
     inverse_absorbances = self.get_inverse_absorbances(output)
     #########################################
     #
     # Renormalize to control for the other stains
     #
     # Log transform the image data
     #
     # First, rescale it a little to offset it from zero
     #
     eps = 1.0 / 256.0 / 2.0
     image = input_pixels + eps
     log_image = numpy.log(image)
     #
     # Now multiply the log-transformed image
     #
     scaled_image = log_image * inverse_absorbances[numpy.newaxis, numpy.newaxis, :]
     #
     # Exponentiate to get the image without the dye effect
     #
     image = numpy.exp(numpy.sum(scaled_image, 2))
     #
     # and subtract out the epsilon we originally introduced
     #
     image -= eps
     image[image < 0] = 0
     image[image > 1] = 1
     image = 1 - image
     image_name = output.image_name.value
     output_image = Image(image, parent_image=input_image)
     workspace.image_set.add(image_name, output_image)
     if self.show_window:
         workspace.display_data.outputs[image_name] = image
예제 #8
0
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        x_data = x.pixel_data

        if x.multichannel:
            x_data = skimage.color.rgb2gray(x_data)

        if x.dimensions == 3:
            y_data = numpy.zeros_like(x_data)

            for z, image in enumerate(x_data):
                y_data[z] = skimage.morphology.medial_axis(image)
        else:
            y_data = skimage.morphology.medial_axis(x_data)

        y = Image(dimensions=x.dimensions, image=y_data, parent_image=x)

        images.add(y_name, y)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = x.dimensions
예제 #9
0
    def run(self, workspace):
        input_image_name = self.input_image_name.value

        template_name = self.template_name.value

        output_image_name = self.output_image_name.value

        image_set = workspace.image_set

        input_image = image_set.get_image(input_image_name)

        input_pixels = input_image.pixel_data

        template = imageio.imread(template_name)

        output_pixels = skimage.feature.match_template(image=input_pixels,
                                                       template=template,
                                                       pad_input=True)

        output_image = Image(output_pixels, parent_image=input_image)

        image_set.add(output_image_name, output_image)

        if self.show_window:
            workspace.display_data.input_pixels = input_pixels

            workspace.display_data.template = template

            workspace.display_data.output_pixels = output_pixels
예제 #10
0
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data = x.pixel_data

        if x.volumetric:
            y_data = skimage.morphology.skeletonize_3d(x_data)
        else:
            y_data = skimage.morphology.skeletonize(x_data)

        y = Image(dimensions=dimensions, image=y_data, parent_image=x)

        images.add(y_name, y)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
    def test_pixel_data(self):
        data = numpy.random.random((224, 224, 3))

        image = Image(data)

        grayscale_image = GrayscaleImage(image)

        assert grayscale_image.pixel_data.shape == (224, 224)
예제 #12
0
 def provide_image(self, image_set):
     image_count = self.__image_count
     mask_2d = image_count > 0
     if self.__how_to_accumulate == P_VARIANCE:
         ndim_image = self.__vsquared
     elif self.__how_to_accumulate == P_POWER:
         ndim_image = self.__power_image
     elif self.__how_to_accumulate == P_BRIGHTFIELD:
         ndim_image = self.__bright_max
     else:
         ndim_image = self.__image
     if ndim_image.ndim == 3:
         image_count = numpy.dstack([image_count] * ndim_image.shape[2])
     mask = image_count > 0
     if self.__cached_image is not None:
         return self.__cached_image
     if self.__how_to_accumulate == P_AVERAGE:
         cached_image = self.__image / image_count
     elif self.__how_to_accumulate == P_VARIANCE:
         cached_image = numpy.zeros(self.__vsquared.shape, numpy.float32)
         cached_image[mask] = self.__vsquared[mask] / image_count[mask]
         cached_image[mask] -= self.__vsum[mask]**2 / (image_count[mask]**2)
     elif self.__how_to_accumulate == P_POWER:
         cached_image = numpy.zeros(image_count.shape, numpy.complex128)
         cached_image[mask] = self.__power_image[mask]
         cached_image[mask] -= (self.__vsum[mask] *
                                self.__power_mask[mask] / image_count[mask])
         cached_image = (cached_image *
                         numpy.conj(cached_image)).real.astype(
                             numpy.float32)
     elif self.__how_to_accumulate == P_BRIGHTFIELD:
         cached_image = numpy.zeros(image_count.shape, numpy.float32)
         cached_image[
             mask] = self.__bright_max[mask] - self.__bright_min[mask]
     elif self.__how_to_accumulate == P_MINIMUM and numpy.any(~mask):
         cached_image = self.__image.copy()
         cached_image[~mask] = 0
     else:
         cached_image = self.__image
     cached_image[~mask] = 0
     if numpy.all(mask) or self.__how_to_accumulate == P_MASK:
         self.__cached_image = Image(cached_image)
     else:
         self.__cached_image = Image(cached_image, mask=mask_2d)
     return self.__cached_image
예제 #13
0
    def test_dimensions(self):
        x = numpy.zeros((100, 224, 224, 3), numpy.float32)

        parent_image = Image(x, dimensions=3)

        objects = Objects()

        objects.parent_image = parent_image

        assert objects.dimensions == 3
예제 #14
0
 def run(self, workspace):
     """do the image analysis"""
     if self.tile_method == T_WITHIN_CYCLES:
         output_pixels = self.place_adjacent(workspace)
     else:
         output_pixels = self.tile(workspace)
     output_image = Image(output_pixels)
     workspace.image_set.add(self.output_image.value, output_image)
     if self.show_window:
         workspace.display_data.image = output_pixels
예제 #15
0
    def run_split(self, workspace, image):
        """Split image into individual components
        """
        input_image = image.pixel_data
        disp_collection = []
        if self.rgb_or_channels in (CH_RGB, CH_CHANNELS):
            for index, name, title in self.channels_and_image_names():
                output_image = input_image[:, :, index]
                workspace.image_set.add(name, Image(output_image, parent_image=image))
                disp_collection.append([output_image, name])
        elif self.rgb_or_channels == CH_HSV:
            output_image = matplotlib.colors.rgb_to_hsv(input_image)
            for index, name, title in self.channels_and_image_names():
                workspace.image_set.add(
                    name, Image(output_image[:, :, index], parent_image=image)
                )
                disp_collection.append([output_image[:, :, index], name])

        workspace.display_data.input_image = input_image
        workspace.display_data.disp_collection = disp_collection
예제 #16
0
    def run(self, workspace):

        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data_orig = x.pixel_data

        x_data = x_data_orig.copy()

        th_abs = None

        if self.exclude_mode.value == MODE_THRESHOLD:
            th_abs = self.min_intensity.value
        elif self.exclude_mode.value == MODE_MASK:
            mask = images.get_image(
                self.mask_image.value).pixel_data.astype(bool)
            x_data[~mask] = 0
        elif self.exclude_mode.value == MODE_OBJECTS:
            mask_objects = workspace.object_set.get_objects(
                self.mask_objects.value)
            mask = mask_objects.segmented.astype(bool)
            x_data[~mask] = 0
        else:
            raise NotImplementedError("Invalid background method choice")

        y_data = peak_local_max(x_data,
                                min_distance=self.min_distance.value,
                                threshold_abs=th_abs,
                                indices=False)

        y = Image(dimensions=dimensions,
                  image=y_data,
                  parent_image=x,
                  convert=False)

        images.add(y_name, y)

        if self.show_window:
            workspace.display_data.x_data = x_data_orig

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions

            workspace.display_data.overlay_base = x_data
예제 #17
0
    def run(self, workspace):
        image = workspace.image_set.get_image(self.image_name.value,
                                              must_be_grayscale=True)
        pixel_data = image.pixel_data
        if self.wants_automatic_object_size.value:
            object_size = min(30, max(1, numpy.mean(pixel_data.shape) / 40))
        else:
            object_size = float(self.object_size.value)
        sigma = object_size / 2.35
        if self.smoothing_method.value == GAUSSIAN_FILTER:

            def fn(image):
                return scipy.ndimage.gaussian_filter(image,
                                                     sigma,
                                                     mode="constant",
                                                     cval=0)

            output_pixels = smooth_with_function_and_mask(
                pixel_data, fn, image.mask)
        elif self.smoothing_method.value == MEDIAN_FILTER:
            output_pixels = median_filter(pixel_data, image.mask,
                                          object_size / 2 + 1)
        elif self.smoothing_method.value == SMOOTH_KEEPING_EDGES:
            sigma_range = float(self.sigma_range.value)

            output_pixels = skimage.restoration.denoise_bilateral(
                image=pixel_data.astype(float),
                multichannel=image.multichannel,
                sigma_color=sigma_range,
                sigma_spatial=sigma,
            )
        elif self.smoothing_method.value == FIT_POLYNOMIAL:
            output_pixels = fit_polynomial(pixel_data, image.mask,
                                           self.clip.value)
        elif self.smoothing_method.value == CIRCULAR_AVERAGE_FILTER:
            output_pixels = circular_average_filter(pixel_data,
                                                    object_size / 2 + 1,
                                                    image.mask)
        elif self.smoothing_method.value == SM_TO_AVERAGE:
            if image.has_mask:
                mean = numpy.mean(pixel_data[image.mask])
            else:
                mean = numpy.mean(pixel_data)
            output_pixels = numpy.ones(pixel_data.shape,
                                       pixel_data.dtype) * mean
        else:
            raise ValueError("Unsupported smoothing method: %s" %
                             self.smoothing_method.value)
        output_image = Image(output_pixels, parent_image=image)
        workspace.image_set.add(self.filtered_image_name.value, output_image)
        workspace.display_data.pixel_data = pixel_data
        workspace.display_data.output_pixels = output_pixels
예제 #18
0
    def run(self, workspace):
        base_image, dimensions = self.base_image(workspace)

        if self.wants_color.value == WANTS_COLOR:
            pixel_data = self.run_color(workspace, base_image.copy())
        else:
            pixel_data = self.run_bw(workspace, base_image)

        output_image = Image(pixel_data, dimensions=dimensions)

        workspace.image_set.add(self.output_image_name.value, output_image)

        if not self.blank_image.value:
            image = workspace.image_set.get_image(self.image_name.value)

            output_image.parent_image = image

        if self.show_window:
            workspace.display_data.pixel_data = pixel_data

            workspace.display_data.image_pixel_data = base_image

            workspace.display_data.dimensions = dimensions
예제 #19
0
    def test_masked(self):
        x = numpy.zeros((224, 224, 3), numpy.float32)

        mask = numpy.ones((224, 224), numpy.bool)

        parent_image = Image(x, mask=mask)

        objects = Objects()

        objects.segmented = mask

        objects.parent_image = parent_image

        numpy.testing.assert_array_equal(objects.masked, mask)
    def run(self, workspace):
        image = workspace.image_set.get_image(self.x_name.value,
                                              must_be_grayscale=True)

        radius = self.object_size.value / 2

        if self.method == ENHANCE:
            if self.enhance_method == E_SPECKLES:
                result = self.enhance_speckles(image, radius,
                                               self.speckle_accuracy.value)
            elif self.enhance_method == E_NEURITES:
                result = self.enhance_neurites(image, radius,
                                               self.neurite_choice.value)
                if self.wants_rescale.value:
                    result = skimage.exposure.rescale_intensity(result)
            elif self.enhance_method == E_DARK_HOLES:
                min_radius = max(1, int(self.hole_size.min / 2))

                max_radius = int((self.hole_size.max + 1) / 2)

                result = self.enhance_dark_holes(image, min_radius, max_radius)
            elif self.enhance_method == E_CIRCLES:
                result = self.enhance_circles(image, radius)
            elif self.enhance_method == E_TEXTURE:
                result = self.enhance_texture(image, self.smoothing.value)
            elif self.enhance_method == E_DIC:
                result = self.enhance_dic(image, self.angle.value,
                                          self.decay.value,
                                          self.smoothing.value)
            else:
                raise NotImplementedError("Unimplemented enhance method: %s" %
                                          self.enhance_method.value)
        elif self.method == SUPPRESS:
            result = self.suppress(image, radius)
        else:
            raise ValueError("Unknown filtering method: %s" % self.method)

        result_image = Image(result,
                             parent_image=image,
                             dimensions=image.dimensions)

        workspace.image_set.add(self.y_name.value, result_image)

        if self.show_window:
            workspace.display_data.x_data = image.pixel_data

            workspace.display_data.y_data = result

            workspace.display_data.dimensions = image.dimensions
예제 #21
0
    def run(self, workspace):
        self.init_pyimagej()

        if self.show_window:
            workspace.display_data.script_input_pixels = {}
            workspace.display_data.script_input_dimensions = {}
            workspace.display_data.script_output_pixels = {}
            workspace.display_data.script_output_dimensions = {}

        script_filepath = path.join(self.script_directory.get_absolute_path(), self.script_file.value)
        # convert the CP settings to script parameters for pyimagej
        script_inputs = {}
        for name in self.script_input_settings:
            setting = self.script_input_settings[name]
            if isinstance(setting, ImageSubscriber):
                # Images need to be pulled from the workspace
                script_inputs[name] = workspace.image_set.get_image(setting.get_value())
                if self.show_window:
                    workspace.display_data.script_input_pixels[name] = script_inputs[name].pixel_data
                    workspace.display_data.script_input_dimensions[name] = script_inputs[name].dimensions
            else:
                # Other settings can be read directly
                script_inputs[name] = setting.get_value()

        # Start the script
        to_imagej.put({PYIMAGEJ_KEY_COMMAND: PYIMAGEJ_CMD_SCRIPT_RUN, PYIMAGEJ_KEY_INPUT:
            {PYIMAGEJ_SCRIPT_RUN_FILE_KEY: script_filepath,
             PYIMAGEJ_SCRIPT_RUN_INPUT_KEY: script_inputs,
             PYIMAGEJ_SCRIPT_RUN_CONVERT_IMAGES: self.convert_types.value}
                            })

        # Retrieve script output
        ij_return = from_imagej.get()
        if ij_return != PYIMAGEJ_STATUS_CMD_UNKNOWN:
            script_outputs = ij_return[PYIMAGEJ_KEY_OUTPUT]
            for name in self.script_output_settings:
                output_key = self.script_output_settings[name].get_value()
                output_value = script_outputs[name]
                # convert back to floats for CellProfiler
                if self.convert_types.value:
                    output_value = skimage.img_as_float(output_value)
                output_image = Image(image=output_value, convert=False)
                workspace.image_set.add(output_key, output_image)
                if self.show_window:
                    workspace.display_data.script_output_pixels[name] = output_image.pixel_data
                    workspace.display_data.dimensions = output_image.dimensions
예제 #22
0
    def run(self, workspace):
        image_set = workspace.image_set
        if self.source_choice == IO_OBJECTS:
            objects = workspace.get_objects(self.object_name.value)
            labels = objects.segmented
            if self.invert_mask.value:
                mask = labels == 0
            else:
                mask = labels > 0
        else:
            objects = None
            try:
                mask = image_set.get_image(self.masking_image_name.value,
                                           must_be_binary=True).pixel_data
            except ValueError:
                mask = image_set.get_image(self.masking_image_name.value,
                                           must_be_grayscale=True).pixel_data
                mask = mask > 0.5
            if self.invert_mask.value:
                mask = mask == 0
        orig_image = image_set.get_image(self.image_name.value)
        if (orig_image.multichannel
                and mask.shape != orig_image.pixel_data.shape[:-1]
            ) or mask.shape != orig_image.pixel_data.shape:
            tmp = numpy.zeros(orig_image.pixel_data.shape[:2], mask.dtype)
            tmp[mask] = True
            mask = tmp
        if orig_image.has_mask:
            mask = numpy.logical_and(mask, orig_image.mask)
        masked_pixels = orig_image.pixel_data.copy()
        masked_pixels[numpy.logical_not(mask)] = 0
        masked_image = Image(
            masked_pixels,
            mask=mask,
            parent_image=orig_image,
            masking_objects=objects,
            dimensions=orig_image.dimensions,
        )

        image_set.add(self.masked_image_name.value, masked_image)

        if self.show_window:
            workspace.display_data.dimensions = orig_image.dimensions
            workspace.display_data.orig_image_pixel_data = orig_image.pixel_data
            workspace.display_data.masked_pixels = masked_pixels
            workspace.display_data.multichannel = orig_image.multichannel
예제 #23
0
    def run_combine(self, workspace, image):
        """Combine images to make a grayscale one
        """
        input_image = image.pixel_data
        channels, contributions = list(zip(*self.channels_and_contributions()))
        denominator = sum(contributions)
        channels = numpy.array(channels, int)
        contributions = numpy.array(contributions) / denominator

        output_image = numpy.sum(
            input_image[:, :, channels]
            * contributions[numpy.newaxis, numpy.newaxis, :],
            2,
        )
        image = Image(output_image, parent_image=image)
        workspace.image_set.add(self.grayscale_name.value, image)

        workspace.display_data.input_image = input_image
        workspace.display_data.output_image = output_image
예제 #24
0
 def run(self, workspace):
     image = workspace.image_set.get_image(self.image_name.value)
     if image.has_mask:
         mask = image.mask
     else:
         mask = None
     pixel_data = image.pixel_data
     if pixel_data.ndim == 3:
         if any([
                 numpy.any(pixel_data[:, :, 0] != pixel_data[:, :, plane])
                 for plane in range(1, pixel_data.shape[2])
         ]):
             logging.warning("Image is color, converting to grayscale")
         pixel_data = numpy.sum(pixel_data, 2) / pixel_data.shape[2]
     for function in self.functions:
         pixel_data = self.run_function(function, pixel_data, mask)
     new_image = Image(pixel_data, parent_image=image)
     workspace.image_set.add(self.output_image_name.value, new_image)
     if self.show_window:
         workspace.display_data.image = image.pixel_data
         workspace.display_data.pixel_data = pixel_data
예제 #25
0
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        x_data = x.pixel_data

        y_data = medialaxis(x_data, x.multichannel, x.volumetric)

        y = Image(dimensions=x.dimensions, image=y_data, parent_image=x)

        images.add(y_name, y)

        if self.show_window:
            workspace.display_data.x_data = x_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = x.dimensions
예제 #26
0
    def run(self, workspace):
        parent_image = None
        parent_image_name = None
        imgset = workspace.image_set
        rgb_pixel_data = None
        input_image_names = []
        channel_names = []
        if self.scheme_choice not in (SCHEME_STACK, SCHEME_COMPOSITE):
            for color_scheme_setting in self.color_scheme_settings:
                if color_scheme_setting.image_name.is_blank:
                    channel_names.append("Blank")
                    continue
                image_name = color_scheme_setting.image_name.value
                input_image_names.append(image_name)
                channel_names.append(image_name)
                image = imgset.get_image(image_name, must_be_grayscale=True)
                multiplier = (
                    color_scheme_setting.intensities
                    * color_scheme_setting.adjustment_factor.value
                )
                pixel_data = image.pixel_data
                if self.wants_rescale.value:
                    pixel_data = pixel_data / numpy.max(pixel_data)
                if parent_image is not None:
                    if parent_image.pixel_data.shape != pixel_data.shape:
                        raise ValueError(
                            "The %s image and %s image have different sizes (%s vs %s)"
                            % (
                                parent_image_name,
                                color_scheme_setting.image_name.value,
                                parent_image.pixel_data.shape,
                                image.pixel_data.shape,
                            )
                        )
                    rgb_pixel_data += numpy.dstack([pixel_data] * 3) * multiplier
                else:
                    parent_image = image
                    parent_image_name = color_scheme_setting.image_name.value
                    rgb_pixel_data = numpy.dstack([pixel_data] * 3) * multiplier
        else:
            input_image_names = [sc.image_name.value for sc in self.stack_channels]
            channel_names = input_image_names
            source_channels = [
                imgset.get_image(name, must_be_grayscale=True).pixel_data
                for name in input_image_names
            ]
            parent_image = imgset.get_image(input_image_names[0])
            for idx, pd in enumerate(source_channels):
                if pd.shape != source_channels[0].shape:
                    raise ValueError(
                        "The %s image and %s image have different sizes (%s vs %s)"
                        % (
                            self.stack_channels[0].image_name.value,
                            self.stack_channels[idx].image_name.value,
                            source_channels[0].shape,
                            pd.pixel_data.shape,
                        )
                    )
            if self.scheme_choice == SCHEME_STACK:
                rgb_pixel_data = numpy.dstack(source_channels)
            else:
                colors = []
                pixel_data = parent_image.pixel_data
                if self.wants_rescale.value:
                    pixel_data = pixel_data / numpy.max(pixel_data)
                for sc in self.stack_channels:
                    color_tuple = sc.color.to_rgb()
                    color = (
                        sc.weight.value
                        * numpy.array(color_tuple).astype(pixel_data.dtype)
                        / 255
                    )
                    colors.append(color[numpy.newaxis, numpy.newaxis, :])
                rgb_pixel_data = (
                    pixel_data[:, :, numpy.newaxis] * colors[0]
                )
                for image, color in zip(source_channels[1:], colors[1:]):
                    if self.wants_rescale.value:
                        image = image / numpy.max(image)
                    rgb_pixel_data = rgb_pixel_data + image[:, :, numpy.newaxis] * color

        ##############
        # Save image #
        ##############
        rgb_image = Image(rgb_pixel_data, parent_image=parent_image)
        rgb_image.channel_names = channel_names
        imgset.add(self.rgb_image_name.value, rgb_image)

        ##################
        # Display images #
        ##################
        if self.show_window:
            workspace.display_data.input_image_names = input_image_names
            workspace.display_data.rgb_pixel_data = rgb_pixel_data
            workspace.display_data.images = [
                imgset.get_image(name, must_be_grayscale=True).pixel_data
                for name in input_image_names
            ]
예제 #27
0
    def run(self, workspace):
        image_names = [
            image.image_name.value for image in self.images
            if image.image_or_measurement == IM_IMAGE
        ]
        image_factors = [image.factor.value for image in self.images]
        wants_image = [
            image.image_or_measurement == IM_IMAGE for image in self.images
        ]

        if self.operation.value in [
                O_INVERT,
                O_LOG_TRANSFORM,
                O_LOG_TRANSFORM_LEGACY,
                O_NOT,
                O_NONE,
        ]:
            # these only operate on the first image
            image_names = image_names[:1]
            image_factors = image_factors[:1]

        images = [workspace.image_set.get_image(x) for x in image_names]
        pixel_data = [image.pixel_data for image in images]
        masks = [image.mask if image.has_mask else None for image in images]

        # Crop all of the images similarly
        smallest = numpy.argmin([numpy.product(pd.shape) for pd in pixel_data])
        smallest_image = images[smallest]
        for i in [x for x in range(len(images)) if x != smallest]:
            pixel_data[i] = smallest_image.crop_image_similarly(pixel_data[i])
            if masks[i] is not None:
                masks[i] = smallest_image.crop_image_similarly(masks[i])

        # weave in the measurements
        idx = 0
        measurements = workspace.measurements
        for i in range(self.operand_count):
            if not wants_image[i]:
                value = measurements.get_current_image_measurement(
                    self.images[i].measurement.value)
                value = numpy.NaN if value is None else float(value)
                pixel_data.insert(i, value)
                masks.insert(i, True)

        # Multiply images by their factors
        for i, image_factor in enumerate(image_factors):
            if image_factor != 1 and self.operation not in BINARY_OUTPUT_OPS:
                pixel_data[i] = pixel_data[i] * image_factors[i]

        output_pixel_data = pixel_data[0]
        output_mask = masks[0]

        opval = self.operation.value
        if opval in [
                O_ADD,
                O_SUBTRACT,
                O_DIFFERENCE,
                O_MULTIPLY,
                O_DIVIDE,
                O_AVERAGE,
                O_MAXIMUM,
                O_MINIMUM,
                O_AND,
                O_OR,
                O_EQUALS,
        ]:
            # Binary operations
            if opval in (O_ADD, O_AVERAGE):
                op = numpy.add
            elif opval == O_SUBTRACT:
                if self.use_logical_operation(pixel_data):
                    output_pixel_data = pixel_data[0].copy()
                else:
                    op = numpy.subtract
            elif opval == O_DIFFERENCE:
                if self.use_logical_operation(pixel_data):
                    op = numpy.logical_xor
                else:

                    def op(x, y):
                        return numpy.abs(numpy.subtract(x, y))

            elif opval == O_MULTIPLY:
                if self.use_logical_operation(pixel_data):
                    op = numpy.logical_and
                else:
                    op = numpy.multiply
            elif opval == O_MINIMUM:
                op = numpy.minimum
            elif opval == O_MAXIMUM:
                op = numpy.maximum
            elif opval == O_AND:
                op = numpy.logical_and
            elif opval == O_OR:
                op = numpy.logical_or
            elif opval == O_EQUALS:
                output_pixel_data = numpy.ones(pixel_data[0].shape, bool)
                comparitor = pixel_data[0]
            else:
                op = numpy.divide
            for pd, mask in zip(pixel_data[1:], masks[1:]):
                if not numpy.isscalar(
                        pd) and output_pixel_data.ndim != pd.ndim:
                    if output_pixel_data.ndim == 2:
                        output_pixel_data = output_pixel_data[:, :,
                                                              numpy.newaxis]
                        if opval == O_EQUALS and not numpy.isscalar(
                                comparitor):
                            comparitor = comparitor[:, :, numpy.newaxis]
                    if pd.ndim == 2:
                        pd = pd[:, :, numpy.newaxis]
                if opval == O_EQUALS:
                    output_pixel_data = output_pixel_data & (comparitor == pd)
                elif opval == O_SUBTRACT and self.use_logical_operation(
                        pixel_data):
                    output_pixel_data[pd] = False
                else:
                    output_pixel_data = op(output_pixel_data, pd)
                if self.ignore_mask:
                    continue
                else:
                    if output_mask is None:
                        output_mask = mask
                    elif mask is not None:
                        output_mask = output_mask & mask
            if opval == O_AVERAGE:
                if not self.use_logical_operation(pixel_data):
                    output_pixel_data /= sum(image_factors)
        elif opval == O_INVERT:
            output_pixel_data = skimage.util.invert(output_pixel_data)
        elif opval == O_NOT:
            output_pixel_data = numpy.logical_not(output_pixel_data)
        elif opval == O_LOG_TRANSFORM:
            output_pixel_data = numpy.log2(output_pixel_data + 1)
        elif opval == O_LOG_TRANSFORM_LEGACY:
            output_pixel_data = numpy.log2(output_pixel_data)
        elif opval == O_NONE:
            output_pixel_data = output_pixel_data.copy()
        else:
            raise NotImplementedError(
                "The operation %s has not been implemented" % opval)

        # Check to see if there was a measurement & image w/o mask. If so
        # set mask to none
        if numpy.isscalar(output_mask):
            output_mask = None
        if opval not in BINARY_OUTPUT_OPS:
            #
            # Post-processing: exponent, multiply, add
            #
            if self.exponent.value != 1:
                output_pixel_data **= self.exponent.value
            if self.after_factor.value != 1:
                output_pixel_data *= self.after_factor.value
            if self.addend.value != 0:
                output_pixel_data += self.addend.value

            #
            # truncate values
            #
            if self.truncate_low.value:
                output_pixel_data[output_pixel_data < 0] = 0
            if self.truncate_high.value:
                output_pixel_data[output_pixel_data > 1] = 1
            if self.replace_nan.value:
                output_pixel_data[numpy.isnan(output_pixel_data)] = 0

        #
        # add the output image to the workspace
        #
        crop_mask = smallest_image.crop_mask if smallest_image.has_crop_mask else None
        masking_objects = (smallest_image.masking_objects
                           if smallest_image.has_masking_objects else None)
        output_image = Image(
            output_pixel_data,
            mask=output_mask,
            crop_mask=crop_mask,
            parent_image=images[0],
            masking_objects=masking_objects,
            convert=False,
            dimensions=images[0].dimensions,
        )
        workspace.image_set.add(self.output_image_name.value, output_image)

        #
        # Display results
        #
        if self.show_window:
            workspace.display_data.pixel_data = [
                image.pixel_data for image in images
            ] + [output_pixel_data]

            workspace.display_data.display_names = image_names + [
                self.output_image_name.value
            ]

            workspace.display_data.dimensions = output_image.dimensions
    def run_image(self, image, workspace):
        """Perform illumination according to the parameters of one image setting group

        """
        #
        # Get the image names from the settings
        #
        image_name = image.image_name.value
        illum_correct_name = image.illum_correct_function_image_name.value
        corrected_image_name = image.corrected_image_name.value
        #
        # Get images from the image set
        #
        orig_image = workspace.image_set.get_image(image_name)
        illum_function = workspace.image_set.get_image(illum_correct_name)
        illum_function_pixel_data = illum_function.pixel_data
        if self.clip.value:
            illum_function_pixel_data = numpy.clip(illum_function_pixel_data, 0, 1)
        if orig_image.pixel_data.ndim == 2:
            illum_function = workspace.image_set.get_image(
                illum_correct_name, must_be_grayscale=True
            )
        else:
            if illum_function_pixel_data.ndim == 2:
                illum_function_pixel_data = illum_function_pixel_data[
                    :, :, numpy.newaxis
                ]
        # Throw an error if image and illum data are incompatible
        if orig_image.pixel_data.shape[:2] != illum_function_pixel_data.shape[:2]:
            raise ValueError(
                "This module requires that the image and illumination function have equal dimensions.\n"
                "The %s image and %s illumination function do not (%s vs %s).\n"
                "If they are paired correctly you may want to use the Resize or Crop module to make them the same size."
                % (
                    image_name,
                    illum_correct_name,
                    orig_image.pixel_data.shape,
                    illum_function_pixel_data.shape,
                )
            )
        #
        # Either divide or subtract the illumination image from the original
        #
        if image.divide_or_subtract == DOS_DIVIDE:
            output_pixels = orig_image.pixel_data / illum_function_pixel_data
        elif image.divide_or_subtract == DOS_SUBTRACT:
            output_pixels = orig_image.pixel_data - illum_function_pixel_data
            output_pixels[output_pixels < 0] = 0
        else:
            raise ValueError(
                "Unhandled option for divide or subtract: %s"
                % image.divide_or_subtract.value
            )
        #
        # Save the output image in the image set and have it inherit
        # mask & cropping from the original image.
        #
        output_image = Image(output_pixels, parent_image=orig_image)
        workspace.image_set.add(corrected_image_name, output_image)
        #
        # Save images for display
        #
        if self.show_window:
            if not hasattr(workspace.display_data, "images"):
                workspace.display_data.images = {}
            workspace.display_data.images[image_name] = orig_image.pixel_data
            workspace.display_data.images[corrected_image_name] = output_pixels
            workspace.display_data.images[
                illum_correct_name
            ] = illum_function.pixel_data
    def run(self, workspace):
        images = workspace.image_set
        x = images.get_image(self.x_name.value)
        dimensions = x.dimensions
        x_data = x.pixel_data

        # Validate some settings
        if self.model.value in (GREY_1, GREY_2) and x.multichannel:
            raise ValueError(
                "Color images are not supported by this model. Please provide greyscale images."
            )
        elif self.model.value == COLOR_1 and not x.multichannel:
            raise ValueError(
                "Greyscale images are not supported by this model. Please provide a color overlay."
            )

        if self.model.value != MODEL_CUSTOM:
            if x.volumetric:
                raise NotImplementedError(
                    "StarDist's inbuilt models do not currently support 3D images"
                )
            model = StarDist2D.from_pretrained(self.model.value)
        else:
            model_directory, model_name = os.path.split(
                self.model_directory.get_absolute_path())
            if x.volumetric:
                from stardist.models import StarDist3D
                model = StarDist3D(config=None,
                                   basedir=model_directory,
                                   name=model_name)
            else:
                model = StarDist2D(config=None,
                                   basedir=model_directory,
                                   name=model_name)

        tiles = None
        if self.tile_image.value:
            tiles = []
            if x.volumetric:
                tiles += [1]
            tiles += [self.n_tiles_x.value, self.n_tiles_y.value]
            # Handle colour channels
            tiles += [1] * max(0, x.pixel_data.ndim - len(tiles))
            print(x.pixel_data.shape, x.pixel_data.ndim, tiles)

        if not self.save_probabilities.value:
            # Probabilities aren't wanted, things are simple
            data = model.predict_instances(normalize(x.pixel_data),
                                           return_predict=False,
                                           n_tiles=tiles)
            y_data = data[0]
        else:
            data, probs = model.predict_instances(normalize(x.pixel_data),
                                                  return_predict=True,
                                                  sparse=False,
                                                  n_tiles=tiles)
            y_data = data[0]

            # Scores aren't at the same resolution as the input image.
            # We need to slightly resize to match the original image.
            size_corrected = resize(probs[0], y_data.shape)
            prob_image = Image(
                size_corrected,
                parent_image=x.parent_image,
                convert=False,
                dimensions=len(size_corrected.shape),
            )

            workspace.image_set.add(self.probabilities_name.value, prob_image)

            if self.show_window:
                workspace.display_data.probabilities = size_corrected

        y = Objects()
        y.segmented = y_data
        y.parent_image = x.parent_image
        objects = workspace.object_set
        objects.add_objects(y, self.y_name.value)

        self.add_measurements(workspace)

        if self.show_window:
            workspace.display_data.x_data = x_data
            workspace.display_data.y_data = y_data
            workspace.display_data.dimensions = dimensions
예제 #30
0
    def run(self, workspace):
        objects = workspace.object_set.get_objects(self.object_name.value)

        alpha = numpy.zeros(objects.shape)

        convert = True

        if self.image_mode == "Binary (black & white)":
            pixel_data = numpy.zeros(objects.shape, bool)
        elif self.image_mode == "Grayscale":
            pixel_data = numpy.zeros(objects.shape)
        elif self.image_mode == "uint16":
            pixel_data = numpy.zeros(objects.shape, numpy.int32)
            convert = False
        else:
            pixel_data = numpy.zeros(objects.shape + (3, ))

        for labels, _ in objects.get_labels():
            mask = labels != 0

            if numpy.all(~mask):
                continue

            if self.image_mode == "Binary (black & white)":
                pixel_data[mask] = True

                alpha[mask] = 1
            elif self.image_mode == "Grayscale":
                pixel_data[mask] = labels[mask].astype(float) / numpy.max(
                    labels)

                alpha[mask] = 1
            elif self.image_mode == "Color":
                if self.colormap.value == DEFAULT_COLORMAP:
                    cm_name = get_default_colormap()
                elif self.colormap.value == "colorcube":
                    # Colorcube missing from matplotlib
                    cm_name = "gist_rainbow"
                elif self.colormap.value == "lines":
                    # Lines missing from matplotlib and not much like it,
                    # Pretty boring palette anyway, hence
                    cm_name = "Pastel1"
                elif self.colormap.value == "white":
                    # White missing from matplotlib, it's just a colormap
                    # of all completely white... not even different kinds of
                    # white. And, isn't white just a uniform sampling of
                    # frequencies from the spectrum?
                    cm_name = "Spectral"
                else:
                    cm_name = self.colormap.value

                cm = matplotlib.cm.get_cmap(cm_name)

                mapper = matplotlib.cm.ScalarMappable(cmap=cm)

                if labels.ndim == 3:
                    for index, plane in enumerate(mask):
                        pixel_data[index, plane, :] = mapper.to_rgba(
                            centrosome.cpmorphology.distance_color_labels(
                                labels[index]))[plane, :3]
                else:
                    pixel_data[mask, :] += mapper.to_rgba(
                        centrosome.cpmorphology.distance_color_labels(labels))[
                            mask, :3]

                alpha[mask] += 1
            elif self.image_mode == "uint16":
                pixel_data[mask] = labels[mask]

                alpha[mask] = 1

        mask = alpha > 0

        if self.image_mode == "Color":
            pixel_data[
                mask, :] = pixel_data[mask, :] / alpha[mask][:, numpy.newaxis]
        elif self.image_mode != "Binary (black & white)":
            pixel_data[mask] = pixel_data[mask] / alpha[mask]

        image = Image(
            pixel_data,
            parent_image=objects.parent_image,
            convert=convert,
            dimensions=len(objects.shape),
        )

        workspace.image_set.add(self.image_name.value, image)

        if self.show_window:
            if image.dimensions == 2:
                workspace.display_data.ijv = objects.ijv
            else:
                workspace.display_data.segmented = objects.segmented

            workspace.display_data.pixel_data = pixel_data

            workspace.display_data.dimensions = image.dimensions