def _read_fit_image(path: Path): """ read FIT image from filesystem :param path: path to image file to load from :type path: pathlib.Path :return: the loaded image, with data and headers parsed or None if a known error occurred :rtype: Image or None """ try: with fits.open(str(path.resolve())) as fit: # pylint: disable=E1101 data = fit[0].data header = fit[0].header image = Image(data) if 'BAYERPAT' in header: image.bayer_pattern = header['BAYERPAT'] _set_image_file_origin(image, path) except OSError as error: _report_fs_error(path, error) return None return image
def process_image(self, image: Image): if image.needs_debayering(): bayer_pattern = image.bayer_pattern cv2_debayer_dict = { "BG": cv2.COLOR_BAYER_BG2RGB, "GB": cv2.COLOR_BAYER_GB2RGB, "RG": cv2.COLOR_BAYER_RG2RGB, "GR": cv2.COLOR_BAYER_GR2RGB } cv_debay = bayer_pattern[3] + bayer_pattern[2] # ugly temp fix for GBRG CFA patterns poorly handled by openCV if cv_debay == "GR": cv_debay = "BG" try: debayered_data = cv2.cvtColor(image.data, cv2_debayer_dict[cv_debay]) except KeyError: raise ProcessingError( f"unsupported bayer pattern : {bayer_pattern}") image.data = debayered_data return image
def process_image(self, image: Image): if image.is_color(): image.set_color_axis_as(0) image.data = np.float32(image.data) return image
def process_image(self, image: Image): if image.is_color(): image.set_color_axis_as(2) image.data = np.uint16(np.clip(image.data, 0, 2**16 - 1)) return image
def process_image(self, image: Image): for param in self._parameters: _LOGGER.debug(f"Autostretch param {param.name} = {param.value}") active = self._parameters[0] stretch_method = self._parameters[1] stretch_strength = self._parameters[2] if active.value: _LOGGER.debug("Performing Autostretch...") image.data = np.interp(image.data, (image.data.min(), image.data.max()), (0, _16_BITS_MAX_VALUE)) @log def histo_adpative_equalization(data): # special case for autostretch value == 0 strength = stretch_strength.value if stretch_strength.value != 0 else 0.1 return exposure.equalize_adapthist(np.uint16(data), nbins=_16_BITS_MAX_VALUE + 1, clip_limit=.01 * strength) @log def contrast_stretching(data): low, high = np.percentile( data, (stretch_strength.value, 100 - stretch_strength.value)) return exposure.rescale_intensity(data, in_range=(low, high)) available_stretches = [ contrast_stretching, histo_adpative_equalization ] chosen_stretch = available_stretches[stretch_method.choices.index( stretch_method.value)] if image.is_color(): for channel in range(3): image.data[channel] = chosen_stretch(image.data[channel]) else: image.data = chosen_stretch(image.data) _LOGGER.debug("Autostretch Done") # autostretch output range is [0, 1] # so we remap values to our range [0, Levels._UPPER_LIMIT] image.data *= _16_BITS_MAX_VALUE # final interpolation image.data = np.float32( np.interp(image.data, (image.data.min(), image.data.max()), (0, _16_BITS_MAX_VALUE))) return image
def _apply_transformation(self, image: Image, transformation: SimilarityTransform): """ Apply a transformation to an image. If image is color, channels are processed using multiprocessing, allowing global operation to take less time on a multi core CPU Image is modified in place by this function :param image: the image to apply transformation to :type image: Image :param transformation: the transformation to apply :type transformation: skimage.transform._geometric.SimilarityTransform """ if image.is_color(): _LOGGER.debug(f"Aligning color image...") manager = Manager() results_dict = manager.dict() channel_processors = [] for channel in range(3): processor = Process( target=Stacker._apply_single_channel_transformation, args=[ image, self._last_stacking_result, transformation, results_dict, channel ]) processor.start() channel_processors.append(processor) for processor in channel_processors: processor.join() _LOGGER.debug( "Color channel processes are done. Fetching results and storing results..." ) for channel, data in results_dict.items(): image.data[channel] = data _LOGGER.debug(f"Aligning color image DONE") else: _LOGGER.debug(f"Aligning b&w image...") result_dict = dict() Stacker._apply_single_channel_transformation( image, self._last_stacking_result, transformation, result_dict) image.data = result_dict[0] _LOGGER.debug(f"Aligning b&w image : DONE")
def on_new_stack_result(self, image: Image): """ A new image has been stacked :param image: the result of the stack :type image: Image """ image.origin = "Stacking result" self._last_stacking_result = image.clone() self.purge_queue(self._post_process_queue) self._post_process_queue.put(image.clone())
def _save_image_as_jpg(image: Image, target_path: str): """ Saves image as jpg. :param image: the image to save :type image: Image :param target_path: the absolute path of the image file to save to :type target_path: str :return: a tuple with 2 values : - True if save succeeded, False otherwise - Details on cause of save failure, if occurs As we are using cv2.imwrite, we won't get any details on failures. So failure details will always be the empty string. """ # here we are sure that image data type us unsigned 16 bits. We need to downscale to 8 bits image.data = (image.data / (((2**16) - 1) / ((2**8) - 1))).astype('uint8') return cv2.imwrite(target_path, cv2.cvtColor(image.data, cv2.COLOR_RGB2BGR), [int(cv2.IMWRITE_JPEG_QUALITY), 90]), ''
def save_image(self, image: Image, file_extension: str, dest_folder_path: str, filename_base: str, add_timestamp: bool = False): """ Save an image to disk. :param image: the image to save :type image: Image :param file_extension: The image save file format extension :type file_extension: str :param dest_folder_path: The path of the folder image will be saved to :type dest_folder_path: str :param filename_base: The name of the file to save to (without extension) :type filename_base: str :param add_timestamp: Do we add a timestamp to image name :type add_timestamp: bool """ filename_base = filename_base if add_timestamp: filename_base += '-' + Controller.get_timestamp() image_to_save = image.clone() image_to_save.destination = dest_folder_path + "/" + filename_base + '.' + file_extension self._saver_queue.put(image_to_save)
def process_image(self, image: Image): # pylint: disable=R0914 active = self._parameters[0] black = self._parameters[1] midtones = self._parameters[2] white = self._parameters[3] for param in self._parameters: _LOGGER.debug(f"Levels param {param.name} = {param.value}") if active.value: # midtones correction do_midtones = not midtones.is_default() _LOGGER.debug(f"Levels : do midtones adjustments : {do_midtones}") if do_midtones: _LOGGER.debug("Performing midtones adjustments...") midtones_value = midtones.value if midtones.value > 0 else 0.1 image.data = _16_BITS_MAX_VALUE * image.data**( 1 / midtones_value) / _16_BITS_MAX_VALUE**(1 / midtones_value) _LOGGER.debug("Midtones level adjustments Done") # black / white levels do_black_white_levels = not black.is_default( ) or not white.is_default() _LOGGER.debug( f"Levels : do black and white adjustments : {do_black_white_levels}" ) if do_black_white_levels: _LOGGER.debug("Performing black / white level adjustments...") image.data = np.clip(image.data, black.value, white.value) _LOGGER.debug("Black / white level adjustments Done") # final interpolation image.data = np.float32( np.interp(image.data, (image.data.min(), image.data.max()), (0, _16_BITS_MAX_VALUE))) return image
def _stack_image(self, image: Image): """ Compute stacking according to user defined stacking mode the image data is modified in place by this function :param image: the image to be stacked :type image: Image """ _LOGGER.debug(f"Stacking in {self._stacking_mode} mode...") if self._stacking_mode == STACKING_MODE_SUM: image.data = image.data + self._last_stacking_result.data elif self._stacking_mode == STACKING_MODE_MEAN: image.data = (self.size * self._last_stacking_result.data + image.data) / (self.size + 1) else: raise StackingError( f"Unsupported stacking mode : {self._stacking_mode}") _LOGGER.debug(f"Stacking in {self._stacking_mode} done.")
def on_new_post_processor_result(self, image: Image): """ A new image processing result is here :param image: the new processing result :type image: Image """ image.origin = "Process result" DYNAMIC_DATA.histogram_container = compute_histograms_for_display( image, Controller._BIN_COUNT) DYNAMIC_DATA.post_processor_result = image self._notify_model_observers(image_only=True) self.save_post_process_result()
def process_image(self, image: Image): """ Performs RGB balance :param image: the image to process :type image: Image """ for param in self._parameters: _LOGGER.debug(f"Color balance param {param.name} = {param.value}") active = self._parameters[0] red = self._parameters[1] green = self._parameters[2] blue = self._parameters[3] if active.value: red_value = red.value if red.value > 0 else 0.1 green_value = green.value if green.value > 0 else 0.1 blue_value = blue.value if blue.value > 0 else 0.1 processed = False if not red.is_default(): image.data[0] = image.data[0] * red_value processed = True if not green.is_default(): image.data[1] = image.data[1] * green_value processed = True if not blue.is_default(): image.data[2] = image.data[2] * blue_value processed = True if processed: image.data = np.clip(image.data, 0, _16_BITS_MAX_VALUE) return image
def _handle_image(self, image: Image): if self.size == 0: _LOGGER.debug( "This is the first image for this stack. Publishing right away" ) self._publish_stacking_result(image) self._align_reference = image else: try: if not image.is_same_shape_as(self._last_stacking_result): raise StackingError( "Image dimensions or color don't match stack content. " f"New image shape : {image.data.shape} <=> " f"Reference shape : {self._last_stacking_result.data.shape}" ) try: if self._align_before_stack: # alignment is a memory greedy process, we take special care of such errors try: self._align_image(image) except OSError as os_error: raise StackingError(os_error) self._stack_image(image) except AttributeError: raise StackingError("Our reference images are gone.") self._publish_stacking_result(image) except StackingError as stacking_error: _LOGGER.warning( f"Could not stack image {image.origin} : {stacking_error}. Image is DISCARDED" )
def _set_image_file_origin(image: Image, path: Path): image.origin = f"FILE : {str(path.resolve())}"
def _read_raw_image(path: Path): """ Reads a RAW DLSR image from file :param path: path to the file to read from :type path: pathlib.Path :return: the image or None if a known error occurred :rtype: Image or None """ try: with imread(str(path.resolve())) as raw_image: # in here, we make sure we store the bayer pattern as it would be advertised if image was a FITS image. # # lets assume image comes from a DSLR sensor with the most common bayer pattern. # # The actual/physical bayer pattern would look like a repetition of : # # +---+---+ # | R | G | # +---+---+ # | G | B | # +---+---+ # # RawPy will report the bayer pattern description as 2 discrete values : # # 1) raw_image.raw_pattern : a 2x2 numpy array representing the indices used to express the bayer patten # # in our example, its value is : # # +---+---+ # | 0 | 1 | # +---+---+ # | 3 | 2 | # +---+---+ # # and its flatten version is : # # [0, 1, 3, 2] # # 2) raw_image.color_desc : a bytes literal formed of the color of each pixel of the bayer pattern, in # ascending index order from raw_image.raw_pattern # # in our example, its value is : b'RGBG' # # We need to express/store this pattern in a more common way, i.e. as it would be described in a FITS # header. Or put simply, we want to express the bayer pattern as it would be described if # raw_image.raw_pattern was : # # +---+---+ # | 0 | 1 | # +---+---+ # | 2 | 3 | # +---+---+ bayer_pattern_indices = raw_image.raw_pattern.flatten() bayer_pattern_desc = raw_image.color_desc.decode() _LOGGER.debug(f"Bayer pattern indices = {bayer_pattern_indices}") _LOGGER.debug(f"Bayer pattern description = {bayer_pattern_desc}") assert len(bayer_pattern_indices) == len(bayer_pattern_desc) bayer_pattern = "" for i, index in enumerate(bayer_pattern_indices): assert bayer_pattern_indices[i] < len(bayer_pattern_indices) bayer_pattern += bayer_pattern_desc[index] _LOGGER.debug( f"Computed, FITS-compatible bayer pattern = {bayer_pattern}") new_image = Image(raw_image.raw_image_visible.copy()) new_image.bayer_pattern = bayer_pattern _set_image_file_origin(new_image, path) return new_image except LibRawNonFatalError as non_fatal_error: _report_fs_error(path, non_fatal_error) return None except LibRawFatalError as fatal_error: _report_fs_error(path, fatal_error) return None
def _find_transformation(self, image: Image): """ Iteratively try and find a valid transformation to align image with stored align reference. We perform 3 tries with growing image sizes of a centered image subset : 10%, 30% and 100% of image size :param image: the image to be aligned :type image: Image :return: the found transformation :raises: StackingError when no transformation is found using the whole image """ for ratio in [.1, .33, 1.]: top, bottom, left, right = self._get_image_subset_boundaries(ratio) # pick green channel if image has color if image.is_color(): new_subset = image.data[1][top:bottom, left:right] ref_subset = self._align_reference.data[1][top:bottom, left:right] else: new_subset = image.data[top:bottom, left:right] ref_subset = self._align_reference.data[top:bottom, left:right] try: _LOGGER.debug( f"Searching valid transformation on subset " f"with ratio:{ratio} and shape: {new_subset.shape}") transformation, matches = al.find_transform( new_subset, ref_subset) _LOGGER.debug( f"Found transformation with subset ratio = {ratio}") _LOGGER.debug(f"rotation : {transformation.rotation}") _LOGGER.debug(f"translation : {transformation.translation}") _LOGGER.debug(f"scale : {transformation.scale}") matches_count = len(matches[0]) _LOGGER.debug( f"image matched features count : {matches_count}") if matches_count < _MINIMUM_MATCHES_FOR_VALID_TRANSFORM: _LOGGER.debug( f"Found transformation but matches count is too low : " f"{matches_count} < {_MINIMUM_MATCHES_FOR_VALID_TRANSFORM}. " "Discarding transformation") raise StackingError("Too few matches") return transformation # pylint: disable=W0703 except Exception as alignment_error: # we have no choice but catching Exception, here. That's what AstroAlign raises in some cases # this will catch MaxIterError as well... if ratio == 1.: raise StackingError(alignment_error) _LOGGER.debug( f"Could not find valid transformation on subset with ratio = {ratio}." ) continue