示例#1
0
    def process_image(self, image: Image):

        for param in self._parameters:
            _LOGGER.debug(f"Autostretch param {param.name} = {param.value}")

        active = self._parameters[0]
        stretch_method = self._parameters[1]
        stretch_strength = self._parameters[2]

        if active.value:
            _LOGGER.debug("Performing Autostretch...")
            image.data = np.interp(image.data,
                                   (image.data.min(), image.data.max()),
                                   (0, _16_BITS_MAX_VALUE))

            @log
            def histo_adpative_equalization(data):

                # special case for autostretch value == 0
                strength = stretch_strength.value if stretch_strength.value != 0 else 0.1

                return exposure.equalize_adapthist(np.uint16(data),
                                                   nbins=_16_BITS_MAX_VALUE +
                                                   1,
                                                   clip_limit=.01 * strength)

            @log
            def contrast_stretching(data):
                low, high = np.percentile(
                    data,
                    (stretch_strength.value, 100 - stretch_strength.value))
                return exposure.rescale_intensity(data, in_range=(low, high))

            available_stretches = [
                contrast_stretching, histo_adpative_equalization
            ]

            chosen_stretch = available_stretches[stretch_method.choices.index(
                stretch_method.value)]

            if image.is_color():
                for channel in range(3):
                    image.data[channel] = chosen_stretch(image.data[channel])
            else:
                image.data = chosen_stretch(image.data)
            _LOGGER.debug("Autostretch Done")

            # autostretch output range is [0, 1]
            # so we remap values to our range [0, Levels._UPPER_LIMIT]
            image.data *= _16_BITS_MAX_VALUE

            # final interpolation
            image.data = np.float32(
                np.interp(image.data, (image.data.min(), image.data.max()),
                          (0, _16_BITS_MAX_VALUE)))

        return image
示例#2
0
    def process_image(self, image: Image):

        if image.needs_debayering():

            bayer_pattern = image.bayer_pattern

            cv2_debayer_dict = {
                "BG": cv2.COLOR_BAYER_BG2RGB,
                "GB": cv2.COLOR_BAYER_GB2RGB,
                "RG": cv2.COLOR_BAYER_RG2RGB,
                "GR": cv2.COLOR_BAYER_GR2RGB
            }

            cv_debay = bayer_pattern[3] + bayer_pattern[2]

            # ugly temp fix for GBRG CFA patterns poorly handled by openCV
            if cv_debay == "GR":
                cv_debay = "BG"

            try:
                debayered_data = cv2.cvtColor(image.data,
                                              cv2_debayer_dict[cv_debay])
            except KeyError:
                raise ProcessingError(
                    f"unsupported bayer pattern : {bayer_pattern}")

            image.data = debayered_data

        return image
示例#3
0
    def _save_image_as_jpg(image: Image, target_path: str):
        """
        Saves image as jpg.

        :param image: the image to save
        :type image: Image

        :param target_path: the absolute path of the image file to save to
        :type target_path: str

        :return: a tuple with 2 values :

          - True if save succeeded, False otherwise
          - Details on cause of save failure, if occurs

        As we are using cv2.imwrite, we won't get any details on failures. So failure details will always
        be the empty string.
        """
        # here we are sure that image data type us unsigned 16 bits. We need to downscale to 8 bits
        image.data = (image.data / (((2**16) - 1) /
                                    ((2**8) - 1))).astype('uint8')

        return cv2.imwrite(target_path,
                           cv2.cvtColor(image.data, cv2.COLOR_RGB2BGR),
                           [int(cv2.IMWRITE_JPEG_QUALITY), 90]), ''
示例#4
0
    def process_image(self, image: Image):

        if image.is_color():
            image.set_color_axis_as(2)

        image.data = np.uint16(np.clip(image.data, 0, 2**16 - 1))

        return image
示例#5
0
    def process_image(self, image: Image):

        if image.is_color():
            image.set_color_axis_as(0)

        image.data = np.float32(image.data)

        return image
示例#6
0
    def _apply_transformation(self, image: Image,
                              transformation: SimilarityTransform):
        """
        Apply a transformation to an image.

        If image is color, channels are processed using multiprocessing, allowing global operation to take less time on
        a multi core CPU

        Image is modified in place by this function

        :param image: the image to apply transformation to
        :type image: Image

        :param transformation: the transformation to apply
        :type transformation: skimage.transform._geometric.SimilarityTransform
        """
        if image.is_color():
            _LOGGER.debug(f"Aligning color image...")

            manager = Manager()
            results_dict = manager.dict()
            channel_processors = []

            for channel in range(3):
                processor = Process(
                    target=Stacker._apply_single_channel_transformation,
                    args=[
                        image, self._last_stacking_result, transformation,
                        results_dict, channel
                    ])
                processor.start()
                channel_processors.append(processor)

            for processor in channel_processors:
                processor.join()

            _LOGGER.debug(
                "Color channel processes are done. Fetching results and storing results..."
            )

            for channel, data in results_dict.items():
                image.data[channel] = data

            _LOGGER.debug(f"Aligning color image DONE")

        else:
            _LOGGER.debug(f"Aligning b&w image...")

            result_dict = dict()

            Stacker._apply_single_channel_transformation(
                image, self._last_stacking_result, transformation, result_dict)

            image.data = result_dict[0]

            _LOGGER.debug(f"Aligning b&w image : DONE")
示例#7
0
    def process_image(self, image: Image):
        # pylint: disable=R0914

        active = self._parameters[0]
        black = self._parameters[1]
        midtones = self._parameters[2]
        white = self._parameters[3]

        for param in self._parameters:
            _LOGGER.debug(f"Levels param {param.name} = {param.value}")

        if active.value:
            # midtones correction
            do_midtones = not midtones.is_default()
            _LOGGER.debug(f"Levels : do midtones adjustments : {do_midtones}")

            if do_midtones:
                _LOGGER.debug("Performing midtones adjustments...")
                midtones_value = midtones.value if midtones.value > 0 else 0.1
                image.data = _16_BITS_MAX_VALUE * image.data**(
                    1 / midtones_value) / _16_BITS_MAX_VALUE**(1 /
                                                               midtones_value)
                _LOGGER.debug("Midtones level adjustments Done")

            # black / white levels
            do_black_white_levels = not black.is_default(
            ) or not white.is_default()
            _LOGGER.debug(
                f"Levels : do black and white adjustments : {do_black_white_levels}"
            )

            if do_black_white_levels:
                _LOGGER.debug("Performing black / white level adjustments...")
                image.data = np.clip(image.data, black.value, white.value)
                _LOGGER.debug("Black / white level adjustments Done")

            # final interpolation
            image.data = np.float32(
                np.interp(image.data, (image.data.min(), image.data.max()),
                          (0, _16_BITS_MAX_VALUE)))

        return image
示例#8
0
    def _stack_image(self, image: Image):
        """
        Compute stacking according to user defined stacking mode

        the image data is modified in place by this function

        :param image: the image to be stacked
        :type image: Image
        """

        _LOGGER.debug(f"Stacking in {self._stacking_mode} mode...")
        if self._stacking_mode == STACKING_MODE_SUM:
            image.data = image.data + self._last_stacking_result.data
        elif self._stacking_mode == STACKING_MODE_MEAN:
            image.data = (self.size * self._last_stacking_result.data +
                          image.data) / (self.size + 1)
        else:
            raise StackingError(
                f"Unsupported stacking mode : {self._stacking_mode}")
        _LOGGER.debug(f"Stacking in {self._stacking_mode} done.")
示例#9
0
    def process_image(self, image: Image):
        """
        Performs RGB balance

        :param image: the image to process
        :type image: Image
        """

        for param in self._parameters:
            _LOGGER.debug(f"Color balance param {param.name} = {param.value}")

        active = self._parameters[0]
        red = self._parameters[1]
        green = self._parameters[2]
        blue = self._parameters[3]

        if active.value:
            red_value = red.value if red.value > 0 else 0.1
            green_value = green.value if green.value > 0 else 0.1
            blue_value = blue.value if blue.value > 0 else 0.1

            processed = False

            if not red.is_default():
                image.data[0] = image.data[0] * red_value
                processed = True

            if not green.is_default():
                image.data[1] = image.data[1] * green_value
                processed = True

            if not blue.is_default():
                image.data[2] = image.data[2] * blue_value
                processed = True

            if processed:
                image.data = np.clip(image.data, 0, _16_BITS_MAX_VALUE)

        return image