def astropy_fits(filename_in, filename_out): frame = cvtColor(Frames.read_image(filename_in), COLOR_RGB2BGR) cv2.imshow('Example - Show image in window', frame) cv2.waitKey(0) cv2.destroyAllWindows() Frames.save_image(filename_out, cvtColor(frame, COLOR_BGR2RGB), color=True)
self.set_original_scale() else: super(FrameViewer, self).keyPressEvent(event) class FrameViewerWidget(QtWidgets.QFrame, Ui_Frame): def __init__(self, input_image): super(FrameViewerWidget, self).__init__() self.setupUi(self) frame_viewer = FrameViewer() frame_viewer.setObjectName("frame_viewer") frame_viewer.setFrameShape(QtWidgets.QFrame.Panel) frame_viewer.setFrameShadow(QtWidgets.QFrame.Sunken) frame_viewer.setMinimumSize(600, 600) frame_viewer.setPhoto(input_image) self.gridLayout.addWidget(frame_viewer, 0, 0, 1, 1) if __name__ == '__main__': # input_file_name = "D:\SW-Development\Python\PlanetarySystemStacker\Examples\Jupiter_Richard\\2020-07-29-2145_3-L-Jupiter_ALTAIRGP224C_pss_gpp.png" input_file_name = "D:\SW-Development\Python\PlanetarySystemStacker\Examples\Jupiter_Richard\\" \ "2020-07-29-2145_3-L-Jupiter_ALTAIRGP224C_pss_p70_b48.png" # input_file_name = "D:\SW-Development\Python\PlanetarySystemStacker\Examples\Moon_2018-03-24\Moon_Tile-024_043939_pss_drizzle2_gpp.png" input_image = Frames.read_image(input_file_name) app = QtWidgets.QApplication(argv) window = FrameViewerWidget(input_image) window.showMaximized() app.exec_()
def execute_frames(self, input_name, input_type, convert_to_grayscale): # If objects are left over from previous run, delete them. for obj in [ self.frames, self.rank_frames, self.align_frames, self.alignment_points, self.stack_frames ]: if obj is not None: del obj # Force the garbage collector to release unreferenced objects. gc.collect() # Update the status bar in the main GUI. self.input_name = input_name self.set_status_bar_processing_phase("reading frames") # A jobs can either "stack" images or "postprocess" a single image. In the latter case, # input is a single image file. # # Images for stacking can either be extracted from a video file or a batch of single # photographs. In the first case, input_type is set to 'video', in the second case to # 'image'. if input_type == 'postproc': self.job_type = 'postproc' self.postproc_input_name = input_name # Reset the postprocessed image to None. This way, in saving the postprocessing result, # it can be checked if an image was computed in the workflow thread. self.postprocessed_image = None self.postprocessed_image_name = PostprocDataObject.set_file_name_processed( input_name, self.configuration.postproc_suffix, self.configuration.global_parameters_image_format) self.attached_log_name = splitext( input_name)[0] + '_postproc-log.txt' # For video file input, the Frames constructor expects the video file name for "names". elif input_type == 'video': self.job_type = 'stacking' names = input_name self.stacked_image_name = splitext(input_name)[0] + \ self.configuration.stack_frames_suffix + '.' + \ self.configuration.global_parameters_image_format self.attached_log_name = splitext( input_name)[0] + '_stacking-log.txt' # For single image input, the Frames constructor expects a list of image file names for # "names". else: # input_type = 'image' self.job_type = 'stacking' names = [join(input_name, name) for name in listdir(input_name)] self.stacked_image_name = input_name + self.configuration.stack_frames_suffix + '.' + \ self.configuration.global_parameters_image_format self.attached_log_name = input_name + '_stacking-log.txt' # Redirect stdout to a file if requested. if self.configuration.global_parameters_write_protocol_to_file != self.output_redirected: # Output currently redirected. Reset to stdout. if self.output_redirected: sys.stdout = self.stdout_saved self.output_redirected = False # Currently set to stdout, redirect to file now. else: try: self.stdout_saved = sys.stdout sys.stdout = open(self.configuration.protocol_filename, 'a+') self.output_redirected = True except IOError: pass # Create logfile if requested to store the log with the stacked file. if self.attached_log_file: self.attached_log_file.close() if self.configuration.global_parameters_store_protocol_with_result: self.attached_log_file = open(self.attached_log_name, "w+") else: self.attached_log_file = None # Write a header to stdout and optionally to the logfile. if self.configuration.global_parameters_protocol_level > 0: decorator_line = (len(input_name) + 28) * "*" Miscellaneous.protocol(decorator_line, self.attached_log_file, precede_with_timestamp=False) Miscellaneous.protocol("Start processing " + input_name, self.attached_log_file) Miscellaneous.protocol(decorator_line, self.attached_log_file, precede_with_timestamp=False) # Initalize the timer object used to measure execution times of program sections. self.my_timer = timer() self.my_timer.create('Execution over all') if self.job_type == 'stacking': # Write parameters to the protocol. if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.print_stacking_parameters( self.configuration, self.attached_log_file) # Decide on the objects to be buffered, depending on configuration parameter. buffer_original, buffer_monochrome, buffer_gaussian, buffer_laplacian = \ Frames.set_buffering(self.configuration.global_parameters_buffering_level) if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.protocol( "+++ Buffering level is " + str(self.configuration.global_parameters_buffering_level) + " +++", self.attached_log_file) if buffer_original: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol("+++ Start reading frames +++", self.attached_log_file) try: self.frames = Frames( self.configuration, names, type=input_type, calibration=self.calibration, convert_to_grayscale=convert_to_grayscale, progress_signal=self.work_current_progress_signal, buffer_original=buffer_original, buffer_monochrome=buffer_monochrome, buffer_gaussian=buffer_gaussian, buffer_laplacian=buffer_laplacian) if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.protocol(" Number of images: " + str(self.frames.number) + ", image shape: " + str(self.frames.shape), self.attached_log_file, precede_with_timestamp=False) if self.frames.calibration_matches: if self.calibration.master_dark_frame_adapted is not None and \ self.calibration.inverse_master_flat_frame is not None: Miscellaneous.protocol( " Dark / flat frame calibration is active", self.attached_log_file, precede_with_timestamp=False) elif self.calibration.master_dark_frame_adapted is not None: Miscellaneous.protocol( " Dark frame calibration is active", self.attached_log_file, precede_with_timestamp=False) elif self.calibration.inverse_master_flat_frame is not None: Miscellaneous.protocol( " Flat frame calibration is active", self.attached_log_file, precede_with_timestamp=False) else: Miscellaneous.protocol( " No matching master dark / flat frames found, " "calibration de-activated", self.attached_log_file, precede_with_timestamp=False) except Error as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + e.message + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return except Exception as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + str(e) + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return # Look up the available RAM (without paging) virtual_memory = dict(psutil.virtual_memory()._asdict()) available_ram = virtual_memory['available'] / 1e9 # Compute the approximate RAM usage of this job at the selected buffering level. needed_ram = self.frames.compute_required_buffer_size( self.configuration.global_parameters_buffering_level) if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.protocol(" RAM required (Gbytes): " + str(needed_ram) + ", available: " + str(available_ram), self.attached_log_file, precede_with_timestamp=False) # If the required RAM is not available, test if lowering the buffering level would help. if needed_ram > available_ram: recommended_level = None for level in range( self.configuration.global_parameters_buffering_level - 1, -1, -1): alternative_ram = self.frames.compute_required_buffer_size( level) if alternative_ram < available_ram: recommended_level = level break # If an appropriate level was found, write it as a recommendation to the protocol. if self.configuration.global_parameters_protocol_level > 0: if recommended_level is not None: Miscellaneous.protocol( "Error: Too little RAM for chosen buffering level," " recommended level: " + str(recommended_level) + ", continuing with next job\n", self.attached_log_file) else: Miscellaneous.protocol( "Error: Too little RAM for this job, " "continuing with the next one\n", self.attached_log_file) # Continue with the next job. self.work_next_task_signal.emit("Next job") return # The RAM seems to be sufficient, continue with ranking frames. self.work_next_task_signal.emit("Rank frames") # Job type is 'postproc'. else: try: self.postproc_input_image = Frames.read_image( self.postproc_input_name) except Error as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + e.message + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return except Exception as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + str(e) + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return # Convert 8 bit to 16 bit. if self.postproc_input_image.dtype == uint8: self.postproc_input_image = self.postproc_input_image.astype( uint16) * 256 self.work_next_task_signal.emit("Postprocessing")