예제 #1
0
    def process_one_group(self, data_model: DataModel,
                          descriptor_list: [FileDescriptor],
                          output_directory: str, combine_method: int,
                          disposition_folder_name, console: Console):
        """
        Process one group of files, output to the given directory
        Exceptions thrown:
            NotAllFlatFrames        The given files are not all flat frames
            IncompatibleSizes       The given files are not all the same dimensions

        :param data_model:                  Data model giving options for current run
        :param descriptor_list:             List of all the files in one group, for processing
        :param output_directory:            Path to directory to receive the output file
        :param combine_method:              Code saying how these files should be combined
        :param disposition_folder_name:     If files to be moved after processing, name of receiving folder
        :param console:                     Re-directable console output object
        """
        assert len(descriptor_list) > 0
        sample_file: FileDescriptor = descriptor_list[0]
        console.push_level()
        self.describe_group(data_model, len(descriptor_list), sample_file,
                            console)

        # Make up a file name for this group's output, into the given directory
        file_name = SharedUtils.get_file_name_portion(
            combine_method, sample_file, data_model.get_sigma_clip_threshold(),
            data_model.get_min_max_number_clipped_per_end())
        output_file = f"{output_directory}/{file_name}"

        # Confirm that these are all flat frames, and can be combined (same binning and dimensions)
        if self.all_compatible_sizes(descriptor_list):
            if data_model.get_ignore_file_type() \
                    or FileCombiner.all_of_type(descriptor_list, FileDescriptor.FILE_TYPE_FLAT):
                # Get (most common) filter name in the set
                # Get filter name to go in the output FITS metadata.
                # All the files should be the same filter, but in case there are stragglers,
                # get the most common filter from the set
                filter_name = SharedUtils.most_common_filter_name(
                    descriptor_list)

                # Do the combination
                self.combine_files(descriptor_list, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    disposition_folder_name, descriptor_list, console)
                self.check_cancellation()
            else:
                raise MasterMakerExceptions.NotAllFlatFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes

        console.pop_level()
예제 #2
0
    def process_one_group(self, data_model: DataModel,
                          descriptor_list: [FileDescriptor],
                          output_directory: str, combine_method: int,
                          disposition_folder_name, console: Console):
        assert len(descriptor_list) > 0
        sample_file: FileDescriptor = descriptor_list[0]
        console.push_level()
        self.describe_group(data_model, len(descriptor_list), sample_file,
                            console)

        # Make up a file name for this group's output, into the given directory
        file_name = SharedUtils.get_file_name_portion(
            combine_method, sample_file, data_model.get_sigma_clip_threshold(),
            data_model.get_min_max_number_clipped_per_end())
        output_file = f"{output_directory}/{file_name}"

        # Confirm that these are all dark frames, and can be combined (same binning and dimensions)
        if self.all_compatible_sizes(descriptor_list):
            if data_model.get_ignore_file_type() \
                    or FileCombiner.all_of_type(descriptor_list, FileDescriptor.FILE_TYPE_DARK):
                # Get (most common) filter name in the set
                # Since these are darks, the filter is meaningless, but we need the value
                # for the shared "create file" routine
                filter_name = SharedUtils.most_common_filter_name(
                    descriptor_list)

                # Do the combination
                self.combine_files(descriptor_list, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    disposition_folder_name, descriptor_list, console)
                self.check_cancellation()
            else:
                raise MasterMakerExceptions.NotAllDarkFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes
        console.pop_level()
예제 #3
0
    def combine_files(self, input_files: [FileDescriptor],
                      data_model: DataModel, filter_name: str,
                      output_path: str, console: Console):
        """
        Combine the given files, output to the given output file using the combination
        method defined in the data model.

        :param input_files:     List of files to be combined
        :param data_model:      Data model with options for this run
        :param filter_name:     Human-readable filter name (for output file name and FITS comment)
        :param output_path:     Path for output fiel to be created
        :param console:         Redirectable console output object
        """
        console.push_level(
        )  # Stack console indentation level to easily restore when done
        substituted_file_name = SharedUtils.substitute_date_time_filter_in_string(
            output_path)
        file_names = [d.get_absolute_path() for d in input_files]
        combine_method = data_model.get_master_combine_method()
        # Get info about any precalibration that is to be done
        calibrator = Calibrator(data_model)
        calibration_tag = calibrator.fits_comment_tag()
        assert len(input_files) > 0
        binning: int = input_files[0].get_binning()
        (mean_exposure, mean_temperature
         ) = ImageMath.mean_exposure_and_temperature(input_files)
        if combine_method == Constants.COMBINE_MEAN:
            mean_data = ImageMath.combine_mean(file_names, calibrator, console,
                                               self._session_controller)
            self.check_cancellation()
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, mean_data,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat MEAN combined {calibration_tag}")
        elif combine_method == Constants.COMBINE_MEDIAN:
            median_data = ImageMath.combine_median(file_names, calibrator,
                                                   console,
                                                   self._session_controller)
            self.check_cancellation()
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, median_data,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat MEDIAN combined {calibration_tag}")
        elif combine_method == Constants.COMBINE_MINMAX:
            number_dropped_points = data_model.get_min_max_number_clipped_per_end(
            )
            min_max_clipped_mean = ImageMath.combine_min_max_clip(
                file_names, number_dropped_points, calibrator, console,
                self._session_controller)
            self.check_cancellation()
            assert min_max_clipped_mean is not None
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, min_max_clipped_mean,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat Min/Max Clipped "
                f"(drop {number_dropped_points}) Mean combined"
                f" {calibration_tag}")
        else:
            assert combine_method == Constants.COMBINE_SIGMA_CLIP
            sigma_threshold = data_model.get_sigma_clip_threshold()
            sigma_clipped_mean = ImageMath.combine_sigma_clip(
                file_names, sigma_threshold, calibrator, console,
                self._session_controller)
            self.check_cancellation()
            assert sigma_clipped_mean is not None
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, sigma_clipped_mean,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat Sigma Clipped "
                f"(threshold {sigma_threshold}) Mean combined"
                f" {calibration_tag}")
        console.pop_level()
예제 #4
0
    def __init__(self, preferences: Preferences, data_model: DataModel):
        """Initialize MainWindow class"""
        self._preferences = preferences
        self._data_model = data_model
        QMainWindow.__init__(self)
        self.ui = uic.loadUi(
            MultiOsUtil.path_for_file_in_program_directory("MainWindow.ui"))
        self._field_validity: {object, bool} = {}
        self._table_model: FitsFileTableModel
        self._indent_level = 0

        # Load algorithm from preferences

        algorithm = data_model.get_master_combine_method()
        if algorithm == Constants.COMBINE_MEAN:
            self.ui.combineMeanRB.setChecked(True)
        elif algorithm == Constants.COMBINE_MEDIAN:
            self.ui.combineMedianRB.setChecked(True)
        elif algorithm == Constants.COMBINE_MINMAX:
            self.ui.combineMinMaxRB.setChecked(True)
        else:
            assert (algorithm == Constants.COMBINE_SIGMA_CLIP)
            self.ui.combineSigmaRB.setChecked(True)

        self.ui.minMaxNumDropped.setText(
            str(data_model.get_min_max_number_clipped_per_end()))
        self.ui.sigmaThreshold.setText(
            str(data_model.get_sigma_clip_threshold()))

        # Load disposition from preferences

        disposition = data_model.get_input_file_disposition()
        if disposition == Constants.INPUT_DISPOSITION_SUBFOLDER:
            self.ui.dispositionSubFolderRB.setChecked(True)
        else:
            assert (disposition == Constants.INPUT_DISPOSITION_NOTHING)
            self.ui.dispositionNothingRB.setChecked(True)
        self.ui.subFolderName.setText(
            data_model.get_disposition_subfolder_name())

        # Pre-calibration options

        precalibration_option = data_model.get_precalibration_type()
        if precalibration_option == Constants.CALIBRATION_FIXED_FILE:
            self.ui.fixedPreCalFileRB.setChecked(True)
        elif precalibration_option == Constants.CALIBRATION_NONE:
            self.ui.noPreClalibrationRB.setChecked(True)
        elif precalibration_option == Constants.CALIBRATION_AUTO_DIRECTORY:
            self.ui.autoPreCalibrationRB.setChecked(True)
        else:
            assert precalibration_option == Constants.CALIBRATION_PEDESTAL
            self.ui.fixedPedestalRB.setChecked(True)
        self.ui.fixedPedestalAmount.setText(
            str(data_model.get_precalibration_pedestal()))
        self.ui.precalibrationPathDisplay.setText(
            os.path.basename(data_model.get_precalibration_fixed_path()))
        self.ui.autoDirectoryName.setText(
            os.path.basename(data_model.get_precalibration_auto_directory()))

        self.ui.autoRecursive.setChecked(
            data_model.get_auto_directory_recursive())
        self.ui.autoBiasOnly.setChecked(
            data_model.get_auto_directory_bias_only())

        # Grouping boxes and parameters

        self.ui.groupBySizeCB.setChecked(data_model.get_group_by_size())
        self.ui.groupByExposureCB.setChecked(
            data_model.get_group_by_exposure())
        self.ui.groupByTemperatureCB.setChecked(
            data_model.get_group_by_temperature())
        self.ui.ignoreSmallGroupsCB.setChecked(
            data_model.get_ignore_groups_fewer_than())

        self.ui.exposureGroupBandwidth.setText(
            f"{data_model.get_exposure_group_bandwidth()}")
        self.ui.temperatureGroupBandwidth.setText(
            f"{data_model.get_temperature_group_bandwidth()}")
        self.ui.minimumGroupSize.setText(
            str(data_model.get_minimum_group_size()))

        # Set up the file table
        self._table_model = FitsFileTableModel(
            self.ui.filesTable, data_model.get_ignore_file_type())
        self.ui.filesTable.setModel(self._table_model)
        # Columns should resize to best fit their contents
        self.ui.filesTable.horizontalHeader().setSectionResizeMode(
            QHeaderView.ResizeToContents)

        # Write a summary, in the main tab, of the settings from the options tab (and data model)
        self.fill_options_readout()

        self.connect_responders()

        # If a window size is saved, set the window size
        window_size = self._preferences.get_main_window_size()
        if window_size is not None:
            self.ui.resize(window_size)

        self.enable_fields()
        self.enable_buttons()
예제 #5
0
 def combine_files(self, input_files: [FileDescriptor],
                   data_model: DataModel, filter_name: str,
                   output_path: str, console: Console):
     console.push_level()
     substituted_file_name = SharedUtils.substitute_date_time_filter_in_string(
         output_path)
     file_names = [d.get_absolute_path() for d in input_files]
     combine_method = data_model.get_master_combine_method()
     # Get info about any precalibration that is to be done
     calibrator = Calibrator(data_model)
     calibration_tag = calibrator.fits_comment_tag()
     assert len(input_files) > 0
     binning: int = input_files[0].get_binning()
     (mean_exposure, mean_temperature
      ) = ImageMath.mean_exposure_and_temperature(input_files)
     if combine_method == Constants.COMBINE_MEAN:
         mean_data = ImageMath.combine_mean(file_names, calibrator, console,
                                            self._session_controller)
         self.check_cancellation()
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, mean_data,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark MEAN combined {calibration_tag}")
     elif combine_method == Constants.COMBINE_MEDIAN:
         median_data = ImageMath.combine_median(file_names, calibrator,
                                                console,
                                                self._session_controller)
         self.check_cancellation()
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, median_data,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark MEDIAN combined {calibration_tag}")
     elif combine_method == Constants.COMBINE_MINMAX:
         number_dropped_points = data_model.get_min_max_number_clipped_per_end(
         )
         min_max_clipped_mean = ImageMath.combine_min_max_clip(
             file_names, number_dropped_points, calibrator, console,
             self._session_controller)
         self.check_cancellation()
         assert min_max_clipped_mean is not None
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, min_max_clipped_mean,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark Min/Max Clipped "
             f"(drop {number_dropped_points}) Mean combined"
             f" {calibration_tag}")
     else:
         assert combine_method == Constants.COMBINE_SIGMA_CLIP
         sigma_threshold = data_model.get_sigma_clip_threshold()
         sigma_clipped_mean = ImageMath.combine_sigma_clip(
             file_names, sigma_threshold, calibrator, console,
             self._session_controller)
         self.check_cancellation()
         assert sigma_clipped_mean is not None
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, sigma_clipped_mean,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark Sigma Clipped "
             f"(threshold {sigma_threshold}) Mean combined"
             f" {calibration_tag}")
     console.pop_level()