示例#1
0
    def callalgorithm(self):
        """Call the bpm algorithm. The only requirement is that the output
        reference file is saved as self.args.outputreffilename

        mkrefs.py will supply the input files in self.inputimages['output_name'].
        This will be a list containing the filenames to use as input. The
        file types (e.g. dark, flat) associated with each filename are
        contained in self.inputimages['imtype']. From this, you can specify
        the appropriate file names in the call to your module.
        """
        # Organize the input files into a group of darks and a group of
        # flats
        flatfiles = []
        darkfiles = []
        for row in self.inputimagestable:
            if row['imlabel'] == 'D':
                darkfiles.append(row['fitsfile'])
            elif row['imlabel'] == 'F':
                flatfiles.append(row['fitsfile'])

        # Since this module is called after calib_prep, all of the requested
        # outputs from the various pipeline steps should be present in the
        # output directory. Create lists of these files. The files listed in
        # self.inputimagestable['fitsfile'] should all be the same in terms
        # of their calibration state. So we should only have to check one in
        # order to know what state all of them are.
        dark_slope_files = []
        dark_uncal_files = []
        dark_jump_files = []
        dark_fitopt_files = []

        directory, filename = os.path.split(darkfiles[0])

        # Get the suffix of the input file so we know the calibration state
        suffix = None
        for ramp_suffix in RATE_FILE_SUFFIXES:
            if ramp_suffix in filename:
                dark_slope_files = copy.deepcopy(darkfiles)
                suffix = ramp_suffix
        if suffix is None:
            suffix = filename.split('_')[-1]
            suffix = suffix.replace('.fits', '')
            if suffix == 'uncal':
                dark_uncal_files = copy.deepcopy(darkfiles)
            elif suffix == 'jump':
                dark_jump_files = copy.deepcopy(darkfiles)
            else:
                raise ValueError('Unexpected suffixes for input dark files.')

        # Create lists of the needed calibration state files
        if len(dark_slope_files) > 0:
            dark_uncal_files = [
                elem.replace(suffix, '_uncal') for elem in dark_slope_files
            ]
            dark_jump_files = [
                elem.replace(suffix, '_jump') for elem in dark_slope_files
            ]
            dark_fitopt_files = [
                elem.replace(suffix, '_fitopt') for elem in dark_slope_files
            ]
        elif len(dark_uncal_files) > 0:
            dark_slope_files = [
                elem.replace(suffix, '_1_ramp_fit')
                for elem in dark_uncal_files
            ]
            dark_jump_files = [
                elem.replace(suffix, '_jump') for elem in dark_uncal_files
            ]
            dark_fitopt_files = [
                elem.replace(suffix, '_fitopt') for elem in dark_uncal_files
            ]
        elif len(dark_jump_files) > 0:
            dark_uncal_files = [
                elem.replace(suffix, '_uncal') for elem in dark_jump_files
            ]
            dark_slope_files = [
                elem.replace(suffix, '_1_ramp_fit') for elem in dark_jump_files
            ]
            dark_fitopt_files = [
                elem.replace(suffix, '_fitopt') for elem in dark_jump_files
            ]

        # Repeat for flat field files
        flat_slope_files = []
        flat_uncal_files = []

        directory, filename = os.path.split(flatfiles[0])

        # Get the suffix of the input file so we know the calibration state
        suffix = None
        for ramp_suffix in RATE_FILE_SUFFIXES:
            if ramp_suffix in filename:
                flat_slope_files = copy.deepcopy(flatfiles)
                suffix = ramp_suffix
        if suffix is None:
            suffix = filename.split('_')[-1]
            suffix = suffix.replace('.fits', '')
            if suffix == 'uncal':
                flat_uncal_files = copy.deepcopy(flatfiles)
            else:
                raise ValueError(
                    'Unexpected suffixes for input flat field files.')

        # Create lists of the needed calibration state files
        if len(flat_slope_files) > 0:
            flat_uncal_files = [
                elem.replace(suffix, '_uncal') for elem in flat_slope_files
            ]
        elif len(flat_uncal_files) > 0:
            flat_slope_files = [
                elem.replace(suffix, '_1_ramp_fit')
                for elem in flat_uncal_files
            ]

        # The bad pixel mask module needs to use the file with the individual
        # slopes (_1_ramp_fit.fits), rather than the mean slope (_0_ramp_fit.fits),
        # for exposures with more than one integration per exposure. But for
        # exposures with only one integration, only the _0_ramp_fit file will be
        # produced. So go through the lists of slope files and check to see
        # which versions are present, and adjust the lists accordingly.

        # Call the wrapped module and provide the proper arguments from the
        # self.parameters dictionary.
        bpm.bad_pixels(
            flat_slope_files=flat_slope_files,
            dead_search=self.parameters['dead_search'],
            low_qe_and_open_search=self.parameters['low_qe_and_open_search'],
            dead_search_type=self.parameters['dead_search_type'],
            flat_mean_sigma_threshold=self.
            parameters['flat_mean_sigma_threshold'],
            flat_mean_normalization_method=self.
            parameters['flat_mean_normalization_method'],
            smoothing_box_width=self.parameters['smoothing_box_width'],
            smoothing_type=self.parameters['smoothing_type'],
            dead_sigma_threshold=self.parameters['dead_sigma_threshold'],
            max_dead_norm_signal=self.parameters['max_dead_norm_signal'],
            run_dead_flux_check=self.parameters['run_dead_flux_check'],
            dead_flux_check_files=flat_uncal_files,
            flux_check=self.parameters['flux_check'],
            max_low_qe_norm_signal=self.parameters['max_low_qe_norm_signal'],
            max_open_adj_norm_signal=self.
            parameters['max_open_adj_norm_signal'],
            manual_flag_file=self.parameters['manual_flag_file'],
            flat_do_not_use=self.parameters['flat_do_not_use'],
            dark_slope_files=dark_slope_files,
            dark_uncal_files=dark_uncal_files,
            dark_jump_files=dark_jump_files,
            dark_fitopt_files=dark_fitopt_files,
            dark_stdev_clipping_sigma=self.
            parameters['dark_stdev_clipping_sigma'],
            dark_max_clipping_iters=self.parameters['dark_max_clipping_iters'],
            dark_noisy_threshold=self.parameters['dark_noisy_threshold'],
            max_saturated_fraction=self.parameters['max_saturated_fraction'],
            max_jump_limit=self.parameters['max_jump_limit'],
            jump_ratio_threshold=self.parameters['jump_ratio_threshold'],
            early_cutoff_fraction=self.parameters['early_cutoff_fraction'],
            pedestal_sigma_threshold=self.
            parameters['pedestal_sigma_threshold'],
            rc_fraction_threshold=self.parameters['rc_fraction_threshold'],
            low_pedestal_fraction=self.parameters['low_pedestal_fraction'],
            high_cr_fraction=self.parameters['high_cr_fraction'],
            flag_values=self.parameters['flag_values'],
            dark_do_not_use=self.parameters['dark_do_not_use'],
            plot=self.parameters['plot'],
            output_file=self.args.outputreffilename,
            author=self.parameters['author'],
            description=self.parameters['description'],
            pedigree=self.parameters['pedigree'],
            useafter=self.parameters['useafter'],
            history=self.parameters['history'],
            quality_check=self.parameters['quality_check'])
        return (0)
示例#2
0
    def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files, dark_slope_files):
        """The main method for processing darks.  See module docstrings
        for further details.

        Parameters
        ----------
        illuminated_raw_files : list
            List of filenames (including full paths) of raw (uncal) flat
            field files. These should all be for the same detector and
            aperture.

        illuminated_slope_files : list
            List of filenames (including full paths) of flat field slope
            files. These should all be for the same detector and
            aperture and correspond one-to-one with
            ``illuminated_raw_files``. For cases where a raw file exists
            but no slope file, the slope file should be None

        dark_raw_files : list
            List of filenames (including full paths) of raw (uncal) dark
            files. These should all be for the same detector and
            aperture.

        dark_slope_files : list
            List of filenames (including full paths) of dark current
            slope files. These should all be for the same detector and
            aperture and correspond one-to-one with ``dark_raw_files``.
            For cases where a raw file exists but no slope file, the
            slope file should be ``None``
        """
        # Illuminated files - run entirety of calwebb_detector1 for uncal
        # files where corresponding rate file is 'None'
        all_files = []
        badpix_types = []
        badpix_types_from_flats = ['DEAD', 'LOW_QE', 'OPEN', 'ADJ_OPEN']
        badpix_types_from_darks = ['HOT', 'RC', 'OTHER_BAD_PIXEL', 'TELEGRAPH']
        illuminated_obstimes = []
        if illuminated_raw_files:
            index = 0
            badpix_types.extend(badpix_types_from_flats)
            for uncal_file, rate_file in zip(illuminated_raw_files, illuminated_slope_files):
                self.get_metadata(uncal_file)
                if rate_file == 'None':
                    jump_output, rate_output, _ = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir,
                                                                                             ramp_fit=True, save_fitopt=False)
                    if self.nints > 1:
                        illuminated_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit')
                    else:
                        illuminated_slope_files[index] = deepcopy(rate_output)
                    index += 1

                # Get observation time for all files
                illuminated_obstimes.append(instrument_properties.get_obstime(uncal_file))

            all_files = deepcopy(illuminated_slope_files)

            min_illum_time = min(illuminated_obstimes)
            max_illum_time = max(illuminated_obstimes)
            mid_illum_time = instrument_properties.mean_time(illuminated_obstimes)

        # Dark files - Run calwebb_detector1 on all uncal files, saving the
        # Jump step output. If corresponding rate file is 'None', then also
        # run the ramp-fit step and save the output
        dark_jump_files = []
        dark_fitopt_files = []
        dark_obstimes = []
        if dark_raw_files:
            index = 0
            badpix_types.extend(badpix_types_from_darks)
            # In this case we need to run the pipeline on all input files,
            # even if the rate file is present, because we also need the jump
            # and fitops files, which are not saved by default
            for uncal_file, rate_file in zip(dark_raw_files, dark_slope_files):
                jump_output, rate_output, fitopt_output = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir,
                                                                                                     ramp_fit=True, save_fitopt=True)
                self.get_metadata(uncal_file)
                dark_jump_files.append(jump_output)
                dark_fitopt_files.append(fitopt_output)
                if self.nints > 1:
                    #dark_slope_files[index] = rate_output.replace('rate', 'rateints')
                    dark_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit')
                else:
                    dark_slope_files[index] = deepcopy(rate_output)
                dark_obstimes.append(instrument_properties.get_obstime(uncal_file))
                index += 1

            if len(all_files) == 0:
                all_files = deepcopy(dark_slope_files)
            else:
                all_files = all_files + dark_slope_files

            min_dark_time = min(dark_obstimes)
            max_dark_time = max(dark_obstimes)
            mid_dark_time = instrument_properties.mean_time(dark_obstimes)

        # For the dead flux check, filter out any files that have less than
        # 4 groups
        dead_flux_files = []
        if illuminated_raw_files:
            for illum_file in illuminated_raw_files:
                ngroup = fits.getheader(illum_file)['NGROUPS']
                if ngroup >= 4:
                    dead_flux_files.append(illum_file)
        if len(dead_flux_files) == 0:
            dead_flux_files = None

        # Instrument-specific preferences from jwst_reffiles meetings
        if self.instrument in ['nircam', 'niriss', 'fgs']:
            dead_search_type = 'sigma_rate'
        elif self.instrument in ['miri', 'nirspec']:
            dead_search_type = 'absolute_rate'

        flat_mean_normalization_method = 'smoothed'

        # Call the bad pixel search module from jwst_reffiles. Lots of
        # other possible parameters. Only specify the non-default params
        # in order to make things easier to read.
        query_string = 'darks_{}_flats_{}_to_{}'.format(self.dark_query_start, self.flat_query_start, self.query_end)
        output_file = '{}_{}_{}_bpm.fits'.format(self.instrument, self.aperture, query_string)
        output_file = os.path.join(self.output_dir, output_file)
        bad_pixel_mask.bad_pixels(flat_slope_files=illuminated_slope_files, dead_search_type=dead_search_type,
                                  flat_mean_normalization_method=flat_mean_normalization_method,
                                  run_dead_flux_check=True, dead_flux_check_files=dead_flux_files, flux_check=35000,
                                  dark_slope_files=dark_slope_files, dark_uncal_files=dark_raw_files,
                                  dark_jump_files=dark_jump_files, dark_fitopt_files=dark_fitopt_files, plot=False,
                                  output_file=output_file, author='jwst_reffiles', description='A bad pix mask',
                                  pedigree='GROUND', useafter='2222-04-01 00:00:00',
                                  history='This file was created by JWQL', quality_check=False)

        # Read in the newly-created bad pixel file
        set_permissions(output_file)
        badpix_map = fits.getdata(output_file)

        # Locate and read in the current bad pixel mask
        parameters = self.make_crds_parameter_dict()
        mask_dictionary = crds_tools.get_reffiles(parameters, ['mask'], download=True)
        baseline_file = mask_dictionary['mask']

        if 'NOT FOUND' in baseline_file:
            logging.warning(('\tNo baseline bad pixel file for {} {}. Any bad '
                             'pixels found as part of this search will be considered new'.format(self.instrument, self.aperture)))
            baseline_file = new_badpix_file
            yd, xd = badpix_mask.shape
            baseline_badpix_mask = np.zeros((yd, xd), type=np.int)
        else:
            logging.info('\tBaseline bad pixel file is {}'.format(baseline_file))
            baseline_badpix_mask = fits.getdata(baseline_file)

        # Exclude hot and dead pixels in the current bad pixel mask
        #new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot')
        new_since_reffile = exclude_crds_mask_pix(badpix_map, baseline_badpix_mask)

        # Create a list of the new instances of each type of bad pixel
        for bad_type in badpix_types:
            bad_location_list = bad_map_to_list(new_since_reffile, bad_type)

            # Add new hot and dead pixels to the database
            logging.info('\tFound {} new {} pixels'.format(len(bad_location_list[0]), bad_type))

            if bad_type in badpix_types_from_flats:
                self.add_bad_pix(bad_location_list, bad_type, illuminated_slope_files, min_illum_time, mid_illum_time, max_illum_time, baseline_file)
            elif bad_type in badpix_types_from_darks:
                self.add_bad_pix(bad_location_list, bad_type, dark_slope_files, min_dark_time, mid_dark_time, max_dark_time, baseline_file)
            else:
                raise ValueError("Unrecognized type of bad pixel: {}. Cannot update database table.".format(bad_type))