예제 #1
0
 def combine_median(cls, file_names: [str], calibrator: Calibrator,
                    console: Console,
                    session_controller: SessionController) -> ndarray:
     """
     Combine the files in the given list using a simple median
     Check, as reading, that they all have the same dimensions
     :param file_names:          Names of files to be combined
     :param calibrator:          Calibration object, abstracting precalibration operations
     :param console:             Redirectable console output handler
     :param session_controller:  Controller for this subtask, checking for cancellation
     :return:                    ndarray giving the 2-dimensional matrix of resulting pixel values
     """
     assert len(
         file_names
     ) > 0  # Otherwise the combine button would have been disabled
     console.push_level()
     console.message("Combine by simple Median", +1)
     descriptors = RmFitsUtil.make_file_descriptions(file_names)
     file_data = RmFitsUtil.read_all_files_data(file_names)
     cls.check_cancellation(session_controller)
     file_data = calibrator.calibrate_images(file_data, descriptors,
                                             console, session_controller)
     cls.check_cancellation(session_controller)
     median_result = numpy.median(file_data, axis=0)
     console.pop_level()
     return median_result
예제 #2
0
    def original_non_grouped_processing(self, selected_files: [FileDescriptor],
                                        data_model: DataModel,
                                        output_file: str, console: Console):
        console.push_level()
        console.message("Using single-file processing", +1)
        # We'll use the first file in the list as a sample for things like image size
        assert len(selected_files) > 0
        # Confirm that these are all dark frames, and can be combined (same binning and dimensions)
        if FileCombiner.all_compatible_sizes(selected_files):
            self.check_cancellation()
            if data_model.get_ignore_file_type() or FileCombiner.all_of_type(
                    selected_files, FileDescriptor.FILE_TYPE_DARK):
                # Get (most common) filter name in the set
                # Since these are darks, the filter is meaningless, but we need the value
                # for the shared "create file" routine
                filter_name = SharedUtils.most_common_filter_name(
                    selected_files)

                # Do the combination
                self.combine_files(selected_files, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                substituted_folder_name = SharedUtils.substitute_date_time_filter_in_string(
                    data_model.get_disposition_subfolder_name())
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    substituted_folder_name, selected_files, console)
            else:
                raise MasterMakerExceptions.NotAllDarkFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes
        console.message("Combining complete", 0)
        console.pop_level()
예제 #3
0
    def process_one_group(self, data_model: DataModel,
                          descriptor_list: [FileDescriptor],
                          output_directory: str, combine_method: int,
                          disposition_folder_name, console: Console):
        """
        Process one group of files, output to the given directory
        Exceptions thrown:
            NotAllFlatFrames        The given files are not all flat frames
            IncompatibleSizes       The given files are not all the same dimensions

        :param data_model:                  Data model giving options for current run
        :param descriptor_list:             List of all the files in one group, for processing
        :param output_directory:            Path to directory to receive the output file
        :param combine_method:              Code saying how these files should be combined
        :param disposition_folder_name:     If files to be moved after processing, name of receiving folder
        :param console:                     Re-directable console output object
        """
        assert len(descriptor_list) > 0
        sample_file: FileDescriptor = descriptor_list[0]
        console.push_level()
        self.describe_group(data_model, len(descriptor_list), sample_file,
                            console)

        # Make up a file name for this group's output, into the given directory
        file_name = SharedUtils.get_file_name_portion(
            combine_method, sample_file, data_model.get_sigma_clip_threshold(),
            data_model.get_min_max_number_clipped_per_end())
        output_file = f"{output_directory}/{file_name}"

        # Confirm that these are all flat frames, and can be combined (same binning and dimensions)
        if self.all_compatible_sizes(descriptor_list):
            if data_model.get_ignore_file_type() \
                    or FileCombiner.all_of_type(descriptor_list, FileDescriptor.FILE_TYPE_FLAT):
                # Get (most common) filter name in the set
                # Get filter name to go in the output FITS metadata.
                # All the files should be the same filter, but in case there are stragglers,
                # get the most common filter from the set
                filter_name = SharedUtils.most_common_filter_name(
                    descriptor_list)

                # Do the combination
                self.combine_files(descriptor_list, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    disposition_folder_name, descriptor_list, console)
                self.check_cancellation()
            else:
                raise MasterMakerExceptions.NotAllFlatFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes

        console.pop_level()
예제 #4
0
    def calibrate_with_auto_directory(
            self, file_data: [ndarray], auto_directory_path: str,
            descriptors: [FileDescriptor], console: Console,
            session_controller: SessionController) -> [ndarray]:
        """
        Calibrate the given files' contents, each with the best-matching calibration file
        from a directory.  "Best" is measured by trying to match both the exposure time
        and temperature, with more weight to the exposure time.  A separate file is chosen
        for each input image, since the exposure times of collected flats often vary
        during the collection session, to keep the ADU level constant as the light changes.
        :param file_data:               List of images' data (list of 2-d matrix of pixel values)
        :param auto_directory_path:     Path to folder of calibration images
        :param descriptors:             Descs of files corresponding to the given images
        :param console:                 Redirectable console output object
        :param session_controller:      Controller for this subtask
        :return:                        List of calibrated images
        """
        assert len(file_data) > 0
        assert len(file_data) == len(descriptors)

        # Get all calibration files from directory so we only have to read it once
        directory_files = self.all_descriptors_from_directory(
            auto_directory_path,
            self._data_model.get_auto_directory_recursive())
        if session_controller.thread_cancelled():
            raise MasterMakerExceptions.SessionCancelled
        if len(directory_files) == 0:
            # No files in that directory, raise exception
            raise MasterMakerExceptions.AutoCalibrationDirectoryEmpty(
                auto_directory_path)

        console.push_level()
        console.message(
            f"Calibrating from directory containing {len(directory_files)} files.",
            +1)
        result = file_data.copy()
        for input_index in range(len(descriptors)):
            if session_controller.thread_cancelled():
                raise MasterMakerExceptions.SessionCancelled
            this_file: FileDescriptor = descriptors[input_index]
            calibration_file = self.get_best_calibration_file(
                directory_files, this_file, session_controller, console)
            if session_controller.thread_cancelled():
                raise MasterMakerExceptions.SessionCancelled
            calibration_image = RmFitsUtil.fits_data_from_path(
                calibration_file)
            (calibration_x, calibration_y) = calibration_image.shape
            (layer_x, layer_y) = result[input_index].shape
            if (layer_x != calibration_x) or (layer_y != calibration_y):
                raise MasterMakerExceptions.IncompatibleSizes
            difference = result[input_index] - calibration_image
            result[input_index] = difference.clip(0, 0xFFFF)
        console.pop_level()
        return result
예제 #5
0
 def combine_median(cls, file_names: [str], calibrator: Calibrator,
                    console: Console,
                    session_controller: SessionController) -> ndarray:
     assert len(
         file_names
     ) > 0  # Otherwise the combine button would have been disabled
     console.push_level()
     console.message("Combine by simple Median", +1)
     file_data = RmFitsUtil.read_all_files_data(file_names)
     cls.check_cancellation(session_controller)
     sample_file = RmFitsUtil.make_file_descriptor(file_names[0])
     file_data = calibrator.calibrate_images(file_data, sample_file,
                                             console, session_controller)
     cls.check_cancellation(session_controller)
     median_result = numpy.median(file_data, axis=0)
     console.pop_level()
     return median_result
예제 #6
0
    def original_non_grouped_processing(self, selected_files: [FileDescriptor],
                                        data_model: DataModel,
                                        output_file: str, console: Console):
        """
        Process one set of files to a single output file.
        Output to the given path, if provided.  If not provided, prompt the user for it.
        :param selected_files:      List of descriptions of files to be combined
        :param data_model:          Data model that gives combination method and other options
        :param output_file:         Path for the combined output file
        :param console:             Re-directable console output object
        """
        console.push_level()
        console.message("Using single-file processing", +1)
        # We'll use the first file in the list as a sample for things like image size
        assert len(selected_files) > 0
        # Confirm that these are all flat frames, and can be combined (same binning and dimensions)
        if FileCombiner.all_compatible_sizes(selected_files):
            self.check_cancellation()
            if data_model.get_ignore_file_type() or FileCombiner.all_of_type(
                    selected_files, FileDescriptor.FILE_TYPE_FLAT):
                # Get (most common) filter name in the set
                # What filter should we put in the metadata for the output file?
                filter_name = SharedUtils.most_common_filter_name(
                    selected_files)

                # Do the combination
                self.combine_files(selected_files, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                substituted_folder_name = SharedUtils.substitute_date_time_filter_in_string(
                    data_model.get_disposition_subfolder_name())
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    substituted_folder_name, selected_files, console)
            else:
                raise MasterMakerExceptions.NotAllFlatFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes
        console.message("Combining complete", 0)
        console.pop_level()
예제 #7
0
    def process_one_group(self, data_model: DataModel,
                          descriptor_list: [FileDescriptor],
                          output_directory: str, combine_method: int,
                          disposition_folder_name, console: Console):
        assert len(descriptor_list) > 0
        sample_file: FileDescriptor = descriptor_list[0]
        console.push_level()
        self.describe_group(data_model, len(descriptor_list), sample_file,
                            console)

        # Make up a file name for this group's output, into the given directory
        file_name = SharedUtils.get_file_name_portion(
            combine_method, sample_file, data_model.get_sigma_clip_threshold(),
            data_model.get_min_max_number_clipped_per_end())
        output_file = f"{output_directory}/{file_name}"

        # Confirm that these are all dark frames, and can be combined (same binning and dimensions)
        if self.all_compatible_sizes(descriptor_list):
            if data_model.get_ignore_file_type() \
                    or FileCombiner.all_of_type(descriptor_list, FileDescriptor.FILE_TYPE_DARK):
                # Get (most common) filter name in the set
                # Since these are darks, the filter is meaningless, but we need the value
                # for the shared "create file" routine
                filter_name = SharedUtils.most_common_filter_name(
                    descriptor_list)

                # Do the combination
                self.combine_files(descriptor_list, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    disposition_folder_name, descriptor_list, console)
                self.check_cancellation()
            else:
                raise MasterMakerExceptions.NotAllDarkFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes
        console.pop_level()
예제 #8
0
    def combine_mean(cls, file_names: [str], calibrator: Calibrator,
                     console: Console,
                     session_controller: SessionController) -> ndarray:
        """Combine FITS files in given list using simple mean.  Return an ndarray containing the combined data."""
        assert len(
            file_names
        ) > 0  # Otherwise the combine button would have been disabled
        console.push_level()
        console.message("Combining by simple mean", +1)
        sample_file = RmFitsUtil.make_file_descriptor(file_names[0])
        file_data: [ndarray]
        file_data = RmFitsUtil.read_all_files_data(file_names)

        cls.check_cancellation(session_controller)
        calibrated_data = calibrator.calibrate_images(file_data, sample_file,
                                                      console,
                                                      session_controller)

        cls.check_cancellation(session_controller)
        mean_result = numpy.mean(calibrated_data, axis=0)
        console.pop_level()
        return mean_result
예제 #9
0
    def process_groups(self, data_model: DataModel,
                       selected_files: [FileDescriptor], output_directory: str,
                       console: Console):
        """
        Process the given selected files in groups by size, exposure, or temperature (or any combination)
        Exceptions thrown:
            NoGroupOutputDirectory      Output directory does not exist and unable to create it
        :param data_model:          Data model specifying options for the current run
        :param selected_files:      List of descriptions of files to be grouped then processed
        :param output_directory:    Directory to contain output files from processed groups
        :param console:             Re-directable console output object
        """
        console.push_level()
        temperature_bandwidth = data_model.get_temperature_group_bandwidth()
        disposition_folder = data_model.get_disposition_subfolder_name()
        substituted_folder_name = SharedUtils.substitute_date_time_filter_in_string(
            disposition_folder)
        console.message(
            "Process groups into output directory: " + output_directory, +1)
        if not SharedUtils.ensure_directory_exists(output_directory):
            raise MasterMakerExceptions.NoGroupOutputDirectory(
                output_directory)
        minimum_group_size = data_model.get_minimum_group_size() \
            if data_model.get_ignore_groups_fewer_than() else 0

        #  Process size groups, or all sizes if not grouping
        groups_by_size = self.get_groups_by_size(
            selected_files, data_model.get_group_by_size())
        group_by_size = data_model.get_group_by_size()
        group_by_temperature = data_model.get_group_by_temperature()
        group_by_filter = data_model.get_group_by_filter()
        for size_group in groups_by_size:
            self.check_cancellation()
            console.push_level()
            # Message about this group only if this grouping was requested
            if len(size_group) < minimum_group_size:
                if group_by_size:
                    console.message(
                        f"Ignoring one size group: {len(size_group)} "
                        f"files {size_group[0].get_size_key()}", +1)
            else:
                if group_by_size:
                    console.message(
                        f"Processing one size group: {len(size_group)} "
                        f"files {size_group[0].get_size_key()}", +1)
                # Within this size group, process temperature groups, or all temperatures if not grouping
                groups_by_temperature = \
                    self.get_groups_by_temperature(size_group,
                                                   data_model.get_group_by_temperature(),
                                                   temperature_bandwidth)
                for temperature_group in groups_by_temperature:
                    self.check_cancellation()
                    console.push_level()
                    (_, mean_temperature
                     ) = ImageMath.mean_exposure_and_temperature(
                         temperature_group)
                    if len(temperature_group) < minimum_group_size:
                        if group_by_temperature:
                            console.message(
                                f"Ignoring one temperature group: {len(temperature_group)} "
                                f"files with mean temperature {mean_temperature:.1f}",
                                +1)
                    else:
                        if group_by_temperature:
                            console.message(
                                f"Processing one temperature group: {len(temperature_group)} "
                                f"files with mean temperature {mean_temperature:.1f}",
                                +1)
                        # Within this temperature group, process filter groups, or all filters if not grouping
                        groups_by_filter = \
                            self.get_groups_by_filter(temperature_group,
                                                      data_model.get_group_by_filter())
                        for filter_group in groups_by_filter:
                            self.check_cancellation()
                            console.push_level()
                            filter_name = filter_group[0].get_filter_name()
                            if len(filter_group) < minimum_group_size:
                                if group_by_filter:
                                    console.message(
                                        f"Ignoring one filter group: {len(filter_group)} "
                                        f"files with {filter_name} filter ",
                                        +1)
                            else:
                                if group_by_filter:
                                    console.message(
                                        f"Processing one filter group: {len(filter_group)} "
                                        f"files with {filter_name} filter ",
                                        +1)
                                self.process_one_group(
                                    data_model, filter_group, output_directory,
                                    data_model.get_master_combine_method(),
                                    substituted_folder_name, console)
                            console.pop_level()
                        self.check_cancellation()
                    console.pop_level()
            console.pop_level()
        console.message("Group combining complete", 0)
        console.pop_level()
예제 #10
0
    def combine_files(self, input_files: [FileDescriptor],
                      data_model: DataModel, filter_name: str,
                      output_path: str, console: Console):
        """
        Combine the given files, output to the given output file using the combination
        method defined in the data model.

        :param input_files:     List of files to be combined
        :param data_model:      Data model with options for this run
        :param filter_name:     Human-readable filter name (for output file name and FITS comment)
        :param output_path:     Path for output fiel to be created
        :param console:         Redirectable console output object
        """
        console.push_level(
        )  # Stack console indentation level to easily restore when done
        substituted_file_name = SharedUtils.substitute_date_time_filter_in_string(
            output_path)
        file_names = [d.get_absolute_path() for d in input_files]
        combine_method = data_model.get_master_combine_method()
        # Get info about any precalibration that is to be done
        calibrator = Calibrator(data_model)
        calibration_tag = calibrator.fits_comment_tag()
        assert len(input_files) > 0
        binning: int = input_files[0].get_binning()
        (mean_exposure, mean_temperature
         ) = ImageMath.mean_exposure_and_temperature(input_files)
        if combine_method == Constants.COMBINE_MEAN:
            mean_data = ImageMath.combine_mean(file_names, calibrator, console,
                                               self._session_controller)
            self.check_cancellation()
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, mean_data,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat MEAN combined {calibration_tag}")
        elif combine_method == Constants.COMBINE_MEDIAN:
            median_data = ImageMath.combine_median(file_names, calibrator,
                                                   console,
                                                   self._session_controller)
            self.check_cancellation()
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, median_data,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat MEDIAN combined {calibration_tag}")
        elif combine_method == Constants.COMBINE_MINMAX:
            number_dropped_points = data_model.get_min_max_number_clipped_per_end(
            )
            min_max_clipped_mean = ImageMath.combine_min_max_clip(
                file_names, number_dropped_points, calibrator, console,
                self._session_controller)
            self.check_cancellation()
            assert min_max_clipped_mean is not None
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, min_max_clipped_mean,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat Min/Max Clipped "
                f"(drop {number_dropped_points}) Mean combined"
                f" {calibration_tag}")
        else:
            assert combine_method == Constants.COMBINE_SIGMA_CLIP
            sigma_threshold = data_model.get_sigma_clip_threshold()
            sigma_clipped_mean = ImageMath.combine_sigma_clip(
                file_names, sigma_threshold, calibrator, console,
                self._session_controller)
            self.check_cancellation()
            assert sigma_clipped_mean is not None
            RmFitsUtil.create_combined_fits_file(
                substituted_file_name, sigma_clipped_mean,
                FileDescriptor.FILE_TYPE_FLAT, "Flat Frame", mean_exposure,
                mean_temperature, filter_name, binning,
                f"Master Flat Sigma Clipped "
                f"(threshold {sigma_threshold}) Mean combined"
                f" {calibration_tag}")
        console.pop_level()
예제 #11
0
    def combine_sigma_clip(
            cls, file_names: [str], sigma_threshold: float,
            calibrator: Calibrator, console: Console,
            session_controller: SessionController) -> Optional[ndarray]:
        console.push_level()
        console.message(
            f"Combine by sigma-clipped mean, z-score threshold {sigma_threshold}",
            +1)
        sample_file = RmFitsUtil.make_file_descriptor(file_names[0])

        file_data = numpy.asarray(RmFitsUtil.read_all_files_data(file_names))
        cls.check_cancellation(session_controller)

        file_data = calibrator.calibrate_images(file_data, sample_file,
                                                console, session_controller)
        cls.check_cancellation(session_controller)

        console.message("Calculating unclipped means", +1)
        column_means = numpy.mean(file_data, axis=0)
        cls.check_cancellation(session_controller)

        console.message("Calculating standard deviations", 0)
        column_stdevs = numpy.std(file_data, axis=0)
        cls.check_cancellation(session_controller)
        console.message("Calculating z-scores", 0)
        # Now what we'd like to do is just:
        #    z_scores = abs(file_data - column_means) / column_stdevs
        # Unfortunately, standard deviations can be zero, so that simplistic
        # statement would generate division-by-zero errors.
        # Std for a column would be zero if all the values in the column were identical.
        # In that case we wouldn't want to eliminate any anyway, so we'll set the
        # zero stdevs to a large number, which causes the z-scores to be small, which
        # causes no values to be eliminated.
        column_stdevs[column_stdevs == 0.0] = sys.float_info.max
        z_scores = abs(file_data - column_means) / column_stdevs
        cls.check_cancellation(session_controller)

        console.message("Eliminated data outside threshold", 0)
        exceeds_threshold = z_scores > sigma_threshold
        cls.check_cancellation(session_controller)

        # Calculate and display how much data we are ignoring
        dimensions = exceeds_threshold.shape
        total_pixels = dimensions[0] * dimensions[1] * dimensions[2]
        number_masked = numpy.count_nonzero(exceeds_threshold)
        percentage_masked = 100.0 * number_masked / total_pixels
        console.message(
            f"Discarded {number_masked:,} pixels of {total_pixels:,} "
            f"({percentage_masked:.3f}% of data)", +1)

        masked_array = ma.masked_array(file_data, exceeds_threshold)
        cls.check_cancellation(session_controller)
        console.message("Calculating adjusted means", -1)
        masked_means = ma.mean(masked_array, axis=0)
        cls.check_cancellation(session_controller)

        # If the means matrix contains any masked values, that means that in that column the clipping
        # eliminated *all* the data.  We will find the offending columns and re-calculate those using
        # simple min-max clipping.
        if ma.is_masked(masked_means):
            console.message(
                "Some columns lost all their values; min-max clipping those columns.",
                0)
            #  Get the mask, and get a 2D matrix showing which columns were entirely masked
            eliminated_columns_map = ndarray.all(exceeds_threshold, axis=0)
            masked_coordinates = numpy.where(eliminated_columns_map)
            x_coordinates = masked_coordinates[0]
            y_coordinates = masked_coordinates[1]
            assert len(x_coordinates) == len(y_coordinates)
            for index in range(len(x_coordinates)):
                cls.check_cancellation(session_controller)
                column_x = x_coordinates[index]
                column_y = y_coordinates[index]
                column = file_data[:, column_x, column_y]
                min_max_clipped_mean: int = round(
                    cls.calc_mm_clipped_mean(column, 2, console,
                                             session_controller))
                masked_means[column_x, column_y] = min_max_clipped_mean
            # We've replaced the problematic columns, now the mean should calculate cleanly
            assert not ma.is_masked(masked_means)
        cls.check_cancellation(session_controller)
        console.pop_level()
        result = masked_means.round().filled()
        return result
예제 #12
0
    def calc_mm_clipped_mean(cls, column: numpy.array,
                             number_dropped_values: int, console: Console,
                             session_controller: SessionController) -> int:
        console.push_level()
        clipped_list = sorted(column.tolist())
        # Example List is now [0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 8, 8, 9, 9]

        # Drop all the instances of the minimum from the list
        drop = number_dropped_values
        while (drop > 0) and (len(clipped_list) > 0):
            cls.check_cancellation(session_controller)
            drop -= 1
            minimum_value = clipped_list[0]  # 0 in example
            # Find the last occurrence of this value in the sorted list
            index_past = numpy.searchsorted(clipped_list,
                                            minimum_value,
                                            side="right")  # example: 2
            if index_past == len(clipped_list):
                clipped_list = []  # We've deleted the whole list
            else:
                clipped_list = clipped_list[index_past:]

        # Drop all the instances of the maximum from the list
        # Eg. now [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 8, 8, 9, 9]
        drop = number_dropped_values
        while (drop > 0) and (len(clipped_list) > 0):
            cls.check_cancellation(session_controller)
            drop -= 1
            maximum_value = clipped_list[-1]  # 9 in example
            # Find the last occurrence of this value in the sorted list
            first_index = numpy.searchsorted(clipped_list,
                                             maximum_value,
                                             side="left")  # example: 16
            if first_index == 0:
                clipped_list = []
            else:
                # Remember, python : ranges automatically omit the last element, so no "minus one"
                clipped_list = clipped_list[0:first_index]
                # Now [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 8, 8]

        cls.check_cancellation(session_controller)
        if len(clipped_list) == 0:
            # Oops.  We've deleted the whole list, now how are we going to find a mean?
            # First, try reducing the number of dropped values
            # print("Min/Max clipping emptied list")
            if number_dropped_values > 1:
                # print("   Try reducing number of dropped values")
                result_mean = cls.calc_mm_clipped_mean(
                    column, number_dropped_values - 1, console,
                    session_controller)
                cls.check_cancellation(session_controller)
            else:
                # Even dropping only 1 value we emptied the list.  Just mean the whole thing.
                # print("   Dropped values at minimum.  Mean column without clipping.")
                result_mean = numpy.mean(column)
        else:
            # We have data left after the min/max clipping.  Calculate its mean
            # print(f"List of {len(column)} values reduced to {len(clipped_list)} values, calc mean")
            result_mean = numpy.mean(clipped_list)
        console.pop_level()
        return result_mean
예제 #13
0
    def min_max_clip_version_5(cls, file_data: ndarray,
                               number_dropped_values: int, console: Console,
                               session_controller: SessionController):
        console.push_level()
        console.message(
            f"Using min-max clip with {number_dropped_values} iterations", +1)
        masked_array = ma.MaskedArray(file_data)
        drop_counter = 1
        while drop_counter <= number_dropped_values:
            cls.check_cancellation(session_controller)
            console.push_level()
            console.message(
                f"Iteration {drop_counter} of {number_dropped_values}.", +1)
            drop_counter += 1
            # Find the minimums in all columns.  This will give a 2d matrix the same size as the images
            # with the column-minimum in each position
            minimum_values = masked_array.min(axis=0)
            cls.check_cancellation(session_controller)

            # Now compare that matrix of minimums down the layers, so we get Trues where
            # each minimum exists in its column (minimums might exist more than once, and
            # we want to find all of them)
            masked_array = ma.masked_where(masked_array == minimum_values,
                                           masked_array)
            cls.check_cancellation(session_controller)
            console.message("Masked minimums.", +1, temp=True)

            # Now find and mask the maximums, same approach
            maximum_values = masked_array.max(axis=0)
            masked_array = ma.masked_where(masked_array == maximum_values,
                                           masked_array)
            cls.check_cancellation(session_controller)
            console.message("Masked maximums.", +1, temp=True)
            console.pop_level()

        console.message(f"Calculating mean of remaining data.", 0)
        masked_means = numpy.mean(masked_array, axis=0)
        cls.check_cancellation(session_controller)
        # If the means matrix contains any masked values, that means that in that column the clipping
        # eliminated *all* the data.  We will find the offending columns and re-calculate those with
        # fewer dropped extremes.  This should exactly reproduce the results of the cell-by-cell methods
        if ma.is_masked(masked_means):
            console.message(
                "Some columns lost all their values; reducing drops for those columns.",
                0)
            #  Get the mask, and get a 2D matrix showing which columns were entirely masked
            the_mask = masked_array.mask
            eliminated_columns_map = ndarray.all(the_mask, axis=0)
            masked_coordinates = numpy.where(eliminated_columns_map)
            cls.check_cancellation(session_controller)
            x_coordinates = masked_coordinates[0]
            y_coordinates = masked_coordinates[1]
            assert len(x_coordinates) == len(y_coordinates)
            repairs = len(x_coordinates)
            cp = "s" if repairs > 1 else ""
            np = "" if repairs > 1 else "s"
            console.message(f"{repairs} column{cp} need{np} repair.", +1)
            for index in range(repairs):
                cls.check_cancellation(session_controller)
                # print(".", end="\n" if (index > 0) and (index % 50 == 0) else "")
                column_x = x_coordinates[index]
                column_y = y_coordinates[index]
                column = file_data[:, column_x, column_y]
                min_max_clipped_mean: int = round(
                    cls.calc_mm_clipped_mean(column, number_dropped_values - 1,
                                             console, session_controller))
                masked_means[column_x, column_y] = min_max_clipped_mean
            # We've replaced the problematic columns, now the mean should calculate cleanly
            assert not ma.is_masked(masked_means)
        console.pop_level()
        return masked_means.round()
예제 #14
0
    def process_groups(self, data_model: DataModel,
                       selected_files: [FileDescriptor], output_directory: str,
                       console: Console):
        console.push_level()
        exposure_bandwidth = data_model.get_exposure_group_bandwidth()
        temperature_bandwidth = data_model.get_temperature_group_bandwidth()
        disposition_folder = data_model.get_disposition_subfolder_name()
        substituted_folder_name = SharedUtils.substitute_date_time_filter_in_string(
            disposition_folder)
        console.message(
            "Process groups into output directory: " + output_directory, +1)
        if not SharedUtils.ensure_directory_exists(output_directory):
            raise MasterMakerExceptions.NoGroupOutputDirectory(
                output_directory)
        minimum_group_size = data_model.get_minimum_group_size() \
            if data_model.get_ignore_groups_fewer_than() else 0

        #  Process size groups, or all sizes if not grouping
        groups_by_size = self.get_groups_by_size(
            selected_files, data_model.get_group_by_size())
        group_by_size = data_model.get_group_by_size()
        group_by_exposure = data_model.get_group_by_exposure()
        group_by_temperature = data_model.get_group_by_temperature()
        for size_group in groups_by_size:
            self.check_cancellation()
            console.push_level()
            # Message about this group only if this grouping was requested
            if len(size_group) < minimum_group_size:
                if group_by_size:
                    console.message(
                        f"Ignoring one size group: {len(size_group)} "
                        f"files {size_group[0].get_size_key()}", +1)
            else:
                if group_by_size:
                    console.message(
                        f"Processing one size group: {len(size_group)} "
                        f"files {size_group[0].get_size_key()}", +1)
                # Within this size group, process exposure groups, or all exposures if not grouping
                groups_by_exposure = self.get_groups_by_exposure(
                    size_group, data_model.get_group_by_exposure(),
                    exposure_bandwidth)
                for exposure_group in groups_by_exposure:
                    self.check_cancellation()
                    (mean_exposure,
                     _) = ImageMath.mean_exposure_and_temperature(
                         exposure_group)
                    console.push_level()
                    if len(exposure_group) < minimum_group_size:
                        if group_by_exposure:
                            console.message(
                                f"Ignoring one exposure group: {len(exposure_group)} "
                                f"files exposed at mean {mean_exposure:.2f} seconds",
                                +1)
                    else:
                        if group_by_exposure:
                            console.message(
                                f"Processing one exposure group: {len(exposure_group)} "
                                f"files exposed at mean {mean_exposure:.2f} seconds",
                                +1)
                        # Within this exposure group, process temperature groups, or all temperatures if not grouping
                        groups_by_temperature = \
                            self.get_groups_by_temperature(exposure_group,
                                                           data_model.get_group_by_temperature(),
                                                           temperature_bandwidth)
                        for temperature_group in groups_by_temperature:
                            self.check_cancellation()
                            console.push_level()
                            (_, mean_temperature
                             ) = ImageMath.mean_exposure_and_temperature(
                                 temperature_group)
                            if len(temperature_group) < minimum_group_size:
                                if group_by_temperature:
                                    console.message(
                                        f"Ignoring one temperature group: {len(temperature_group)} "
                                        f"files with mean temperature {mean_temperature:.1f}",
                                        +1)
                            else:
                                if group_by_temperature:
                                    console.message(
                                        f"Processing one temperature group: {len(temperature_group)} "
                                        f"files with mean temperature {mean_temperature:.1f}",
                                        +1)
                                # Now we have a list of descriptors, grouped as appropriate, to process
                                self.process_one_group(
                                    data_model, temperature_group,
                                    output_directory,
                                    data_model.get_master_combine_method(),
                                    substituted_folder_name, console)
                                self.check_cancellation()
                            console.pop_level()
                    console.pop_level()
            console.pop_level()
        console.message("Group combining complete", 0)
        console.pop_level()
예제 #15
0
 def combine_files(self, input_files: [FileDescriptor],
                   data_model: DataModel, filter_name: str,
                   output_path: str, console: Console):
     console.push_level()
     substituted_file_name = SharedUtils.substitute_date_time_filter_in_string(
         output_path)
     file_names = [d.get_absolute_path() for d in input_files]
     combine_method = data_model.get_master_combine_method()
     # Get info about any precalibration that is to be done
     calibrator = Calibrator(data_model)
     calibration_tag = calibrator.fits_comment_tag()
     assert len(input_files) > 0
     binning: int = input_files[0].get_binning()
     (mean_exposure, mean_temperature
      ) = ImageMath.mean_exposure_and_temperature(input_files)
     if combine_method == Constants.COMBINE_MEAN:
         mean_data = ImageMath.combine_mean(file_names, calibrator, console,
                                            self._session_controller)
         self.check_cancellation()
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, mean_data,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark MEAN combined {calibration_tag}")
     elif combine_method == Constants.COMBINE_MEDIAN:
         median_data = ImageMath.combine_median(file_names, calibrator,
                                                console,
                                                self._session_controller)
         self.check_cancellation()
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, median_data,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark MEDIAN combined {calibration_tag}")
     elif combine_method == Constants.COMBINE_MINMAX:
         number_dropped_points = data_model.get_min_max_number_clipped_per_end(
         )
         min_max_clipped_mean = ImageMath.combine_min_max_clip(
             file_names, number_dropped_points, calibrator, console,
             self._session_controller)
         self.check_cancellation()
         assert min_max_clipped_mean is not None
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, min_max_clipped_mean,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark Min/Max Clipped "
             f"(drop {number_dropped_points}) Mean combined"
             f" {calibration_tag}")
     else:
         assert combine_method == Constants.COMBINE_SIGMA_CLIP
         sigma_threshold = data_model.get_sigma_clip_threshold()
         sigma_clipped_mean = ImageMath.combine_sigma_clip(
             file_names, sigma_threshold, calibrator, console,
             self._session_controller)
         self.check_cancellation()
         assert sigma_clipped_mean is not None
         RmFitsUtil.create_combined_fits_file(
             substituted_file_name, sigma_clipped_mean,
             FileDescriptor.FILE_TYPE_DARK, "Dark Frame", mean_exposure,
             mean_temperature, filter_name, binning,
             f"Master Dark Sigma Clipped "
             f"(threshold {sigma_threshold}) Mean combined"
             f" {calibration_tag}")
     console.pop_level()