예제 #1
0
    def process_image(self, image: Image):

        if image.is_color():
            image.set_color_axis_as(2)

        image.data = np.uint16(np.clip(image.data, 0, 2**16 - 1))

        return image
예제 #2
0
    def process_image(self, image: Image):

        if image.is_color():
            image.set_color_axis_as(0)

        image.data = np.float32(image.data)

        return image
예제 #3
0
    def process_image(self, image: Image):

        for param in self._parameters:
            _LOGGER.debug(f"Autostretch param {param.name} = {param.value}")

        active = self._parameters[0]
        stretch_method = self._parameters[1]
        stretch_strength = self._parameters[2]

        if active.value:
            _LOGGER.debug("Performing Autostretch...")
            image.data = np.interp(image.data,
                                   (image.data.min(), image.data.max()),
                                   (0, _16_BITS_MAX_VALUE))

            @log
            def histo_adpative_equalization(data):

                # special case for autostretch value == 0
                strength = stretch_strength.value if stretch_strength.value != 0 else 0.1

                return exposure.equalize_adapthist(np.uint16(data),
                                                   nbins=_16_BITS_MAX_VALUE +
                                                   1,
                                                   clip_limit=.01 * strength)

            @log
            def contrast_stretching(data):
                low, high = np.percentile(
                    data,
                    (stretch_strength.value, 100 - stretch_strength.value))
                return exposure.rescale_intensity(data, in_range=(low, high))

            available_stretches = [
                contrast_stretching, histo_adpative_equalization
            ]

            chosen_stretch = available_stretches[stretch_method.choices.index(
                stretch_method.value)]

            if image.is_color():
                for channel in range(3):
                    image.data[channel] = chosen_stretch(image.data[channel])
            else:
                image.data = chosen_stretch(image.data)
            _LOGGER.debug("Autostretch Done")

            # autostretch output range is [0, 1]
            # so we remap values to our range [0, Levels._UPPER_LIMIT]
            image.data *= _16_BITS_MAX_VALUE

            # final interpolation
            image.data = np.float32(
                np.interp(image.data, (image.data.min(), image.data.max()),
                          (0, _16_BITS_MAX_VALUE)))

        return image
예제 #4
0
    def _apply_transformation(self, image: Image,
                              transformation: SimilarityTransform):
        """
        Apply a transformation to an image.

        If image is color, channels are processed using multiprocessing, allowing global operation to take less time on
        a multi core CPU

        Image is modified in place by this function

        :param image: the image to apply transformation to
        :type image: Image

        :param transformation: the transformation to apply
        :type transformation: skimage.transform._geometric.SimilarityTransform
        """
        if image.is_color():
            _LOGGER.debug(f"Aligning color image...")

            manager = Manager()
            results_dict = manager.dict()
            channel_processors = []

            for channel in range(3):
                processor = Process(
                    target=Stacker._apply_single_channel_transformation,
                    args=[
                        image, self._last_stacking_result, transformation,
                        results_dict, channel
                    ])
                processor.start()
                channel_processors.append(processor)

            for processor in channel_processors:
                processor.join()

            _LOGGER.debug(
                "Color channel processes are done. Fetching results and storing results..."
            )

            for channel, data in results_dict.items():
                image.data[channel] = data

            _LOGGER.debug(f"Aligning color image DONE")

        else:
            _LOGGER.debug(f"Aligning b&w image...")

            result_dict = dict()

            Stacker._apply_single_channel_transformation(
                image, self._last_stacking_result, transformation, result_dict)

            image.data = result_dict[0]

            _LOGGER.debug(f"Aligning b&w image : DONE")
예제 #5
0
    def _find_transformation(self, image: Image):
        """
        Iteratively try and find a valid transformation to align image with stored align reference.

        We perform 3 tries with growing image sizes of a centered image subset : 10%, 30% and 100% of image size

        :param image: the image to be aligned
        :type image: Image

        :return: the found transformation
        :raises: StackingError when no transformation is found using the whole image
        """

        for ratio in [.1, .33, 1.]:

            top, bottom, left, right = self._get_image_subset_boundaries(ratio)

            # pick green channel if image has color
            if image.is_color():
                new_subset = image.data[1][top:bottom, left:right]
                ref_subset = self._align_reference.data[1][top:bottom,
                                                           left:right]
            else:
                new_subset = image.data[top:bottom, left:right]
                ref_subset = self._align_reference.data[top:bottom, left:right]

            try:
                _LOGGER.debug(
                    f"Searching valid transformation on subset "
                    f"with ratio:{ratio} and shape: {new_subset.shape}")

                transformation, matches = al.find_transform(
                    new_subset, ref_subset)

                _LOGGER.debug(
                    f"Found transformation with subset ratio = {ratio}")
                _LOGGER.debug(f"rotation : {transformation.rotation}")
                _LOGGER.debug(f"translation : {transformation.translation}")
                _LOGGER.debug(f"scale : {transformation.scale}")
                matches_count = len(matches[0])
                _LOGGER.debug(
                    f"image matched features count : {matches_count}")

                if matches_count < _MINIMUM_MATCHES_FOR_VALID_TRANSFORM:
                    _LOGGER.debug(
                        f"Found transformation but matches count is too low : "
                        f"{matches_count} < {_MINIMUM_MATCHES_FOR_VALID_TRANSFORM}. "
                        "Discarding transformation")
                    raise StackingError("Too few matches")

                return transformation

            # pylint: disable=W0703
            except Exception as alignment_error:
                # we have no choice but catching Exception, here. That's what AstroAlign raises in some cases
                # this will catch MaxIterError as well...
                if ratio == 1.:
                    raise StackingError(alignment_error)

                _LOGGER.debug(
                    f"Could not find valid transformation on subset with ratio = {ratio}."
                )
                continue