コード例 #1
0
    def process(self, file_list):
        """The main method for processing darks.  See module docstrings
        for further details.

        Parameters
        ----------
        file_list : list
            List of filenames (including full paths) to the dark current
            files
        """

        # Basic metadata that will be needed later
        self.get_metadata(file_list[0])

        # Determine which pipeline steps need to be executed
        required_steps = pipeline_tools.get_pipeline_steps(self.instrument)
        logging.info(
            '\tRequired calwebb1_detector pipeline steps to have the data in the '
            'correct format:')
        for item in required_steps:
            logging.info('\t\t{}: {}'.format(item, required_steps[item]))

        # Modify the list of pipeline steps to skip those not needed for the
        # preparation of dark current data
        required_steps['dark_current'] = False
        required_steps['persistence'] = False

        # NIRSpec IR^2 readout pattern NRSIRS2 is the only one with
        # nframes not a power of 2
        if self.read_pattern not in pipeline_tools.GROUPSCALE_READOUT_PATTERNS:
            required_steps['group_scale'] = False

        # Run pipeline steps on files, generating slope files
        slope_files = []
        for filename in file_list:

            completed_steps = pipeline_tools.completed_pipeline_steps(filename)
            steps_to_run = pipeline_tools.steps_to_run(required_steps,
                                                       completed_steps)

            logging.info('\tWorking on file: {}'.format(filename))
            logging.info('\tPipeline steps that remain to be run:')
            for item in steps_to_run:
                logging.info('\t\t{}: {}'.format(item, steps_to_run[item]))

            # Run any remaining required pipeline steps
            if any(steps_to_run.values()) is False:
                slope_files.append(filename)
            else:
                processed_file = filename.replace('.fits',
                                                  '_{}.fits'.format('rate'))

                # If the slope file already exists, skip the pipeline call
                if not os.path.isfile(processed_file):
                    logging.info('\tRunning pipeline on {}'.format(filename))
                    processed_file = pipeline_tools.run_calwebb_detector1_steps(
                        os.path.abspath(filename), steps_to_run)
                    logging.info('\tPipeline complete. Output: {}'.format(
                        processed_file))

                else:
                    logging.info(
                        '\tSlope file {} already exists. Skipping call to pipeline.'
                        .format(processed_file))
                    pass

                slope_files.append(processed_file)

                # Delete the original dark ramp file to save disk space
                os.remove(filename)

        obs_times = []
        logging.info(
            '\tSlope images to use in the dark monitor for {}, {}:'.format(
                self.instrument, self.aperture))
        for item in slope_files:
            logging.info('\t\t{}'.format(item))
            # Get the observation time for each file
            obstime = instrument_properties.get_obstime(item)
            obs_times.append(obstime)

        # Find the earliest and latest observation time, and calculate
        # the mid-time.
        min_time = np.min(obs_times)
        max_time = np.max(obs_times)
        mid_time = instrument_properties.mean_time(obs_times)

        # Read in all slope images and place into a list
        slope_image_stack, slope_exptimes = pipeline_tools.image_stack(
            slope_files)

        # Calculate a mean slope image from the inputs
        slope_image, stdev_image = calculations.mean_image(slope_image_stack,
                                                           sigma_threshold=3)
        mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image,
                                                     slope_files)
        logging.info(
            '\tSigma-clipped mean of the slope images saved to: {}'.format(
                mean_slope_file))

        # ----- Search for new hot/dead/noisy pixels -----
        # Read in baseline mean slope image and stdev image
        # The baseline image is used to look for hot/dead/noisy pixels,
        # but not for comparing mean dark rates. Therefore, updates to
        # the baseline can be minimal.

        # Limit checks for hot/dead/noisy pixels to full frame data since
        # subarray data have much shorter exposure times and therefore lower
        # signal-to-noise
        aperture_type = Siaf(self.instrument)[self.aperture].AperType
        if aperture_type == 'FULLSCA':
            baseline_file = self.get_baseline_filename()
            if baseline_file is None:
                logging.warning((
                    '\tNo baseline dark current countrate image for {} {}. Setting the '
                    'current mean slope image to be the new baseline.'.format(
                        self.instrument, self.aperture)))
                baseline_file = mean_slope_file
                baseline_mean = deepcopy(slope_image)
                baseline_stdev = deepcopy(stdev_image)
            else:
                logging.info('\tBaseline file is {}'.format(baseline_file))
                baseline_mean, baseline_stdev = self.read_baseline_slope_image(
                    baseline_file)

            # Check the hot/dead pixel population for changes
            new_hot_pix, new_dead_pix = self.find_hot_dead_pixels(
                slope_image, baseline_mean)

            # Shift the coordinates to be in full frame coordinate system
            new_hot_pix = self.shift_to_full_frame(new_hot_pix)
            new_dead_pix = self.shift_to_full_frame(new_dead_pix)

            # Exclude hot and dead pixels found previously
            new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot')
            new_dead_pix = self.exclude_existing_badpix(new_dead_pix, 'dead')

            # Add new hot and dead pixels to the database
            logging.info('\tFound {} new hot pixels'.format(len(
                new_hot_pix[0])))
            logging.info('\tFound {} new dead pixels'.format(
                len(new_dead_pix[0])))
            self.add_bad_pix(new_hot_pix, 'hot', file_list, mean_slope_file,
                             baseline_file, min_time, mid_time, max_time)
            self.add_bad_pix(new_dead_pix, 'dead', file_list, mean_slope_file,
                             baseline_file, min_time, mid_time, max_time)

            # Check for any pixels that are significantly more noisy than
            # in the baseline stdev image
            new_noisy_pixels = self.noise_check(stdev_image, baseline_stdev)

            # Shift coordinates to be in full_frame coordinate system
            new_noisy_pixels = self.shift_to_full_frame(new_noisy_pixels)

            # Exclude previously found noisy pixels
            new_noisy_pixels = self.exclude_existing_badpix(
                new_noisy_pixels, 'noisy')

            # Add new noisy pixels to the database
            logging.info('\tFound {} new noisy pixels'.format(
                len(new_noisy_pixels[0])))
            self.add_bad_pix(new_noisy_pixels, 'noisy', file_list,
                             mean_slope_file, baseline_file, min_time,
                             mid_time, max_time)

        # ----- Calculate image statistics -----

        # Find amplifier boundaries so per-amp statistics can be calculated
        number_of_amps, amp_bounds = instrument_properties.amplifier_info(
            slope_files[0])
        logging.info('\tAmplifier boundaries: {}'.format(amp_bounds))

        # Calculate mean and stdev values, and fit a Gaussian to the
        # histogram of the pixels in each amp
        (amp_mean, amp_stdev, gauss_param, gauss_chisquared,
         double_gauss_params, double_gauss_chisquared, histogram,
         bins) = self.stats_by_amp(slope_image, amp_bounds)

        # Construct new entry for dark database table
        source_files = [os.path.basename(item) for item in file_list]
        for key in amp_mean.keys():
            dark_db_entry = {
                'aperture': self.aperture,
                'amplifier': key,
                'mean': amp_mean[key],
                'stdev': amp_stdev[key],
                'source_files': source_files,
                'obs_start_time': min_time,
                'obs_mid_time': mid_time,
                'obs_end_time': max_time,
                'gauss_amplitude': list(gauss_param[key][0]),
                'gauss_peak': list(gauss_param[key][1]),
                'gauss_width': list(gauss_param[key][2]),
                'gauss_chisq': gauss_chisquared[key],
                'double_gauss_amplitude1': double_gauss_params[key][0],
                'double_gauss_peak1': double_gauss_params[key][1],
                'double_gauss_width1': double_gauss_params[key][2],
                'double_gauss_amplitude2': double_gauss_params[key][3],
                'double_gauss_peak2': double_gauss_params[key][4],
                'double_gauss_width2': double_gauss_params[key][5],
                'double_gauss_chisq': double_gauss_chisquared[key],
                'mean_dark_image_file': os.path.basename(mean_slope_file),
                'hist_dark_values': bins,
                'hist_amplitudes': histogram,
                'entry_date': datetime.datetime.now()
            }
            self.stats_table.__table__.insert().execute(dark_db_entry)
コード例 #2
0
    def process(self, illuminated_raw_files, illuminated_slope_files, dark_raw_files, dark_slope_files):
        """The main method for processing darks.  See module docstrings
        for further details.

        Parameters
        ----------
        illuminated_raw_files : list
            List of filenames (including full paths) of raw (uncal) flat
            field files. These should all be for the same detector and
            aperture.

        illuminated_slope_files : list
            List of filenames (including full paths) of flat field slope
            files. These should all be for the same detector and
            aperture and correspond one-to-one with
            ``illuminated_raw_files``. For cases where a raw file exists
            but no slope file, the slope file should be None

        dark_raw_files : list
            List of filenames (including full paths) of raw (uncal) dark
            files. These should all be for the same detector and
            aperture.

        dark_slope_files : list
            List of filenames (including full paths) of dark current
            slope files. These should all be for the same detector and
            aperture and correspond one-to-one with ``dark_raw_files``.
            For cases where a raw file exists but no slope file, the
            slope file should be ``None``
        """
        # Illuminated files - run entirety of calwebb_detector1 for uncal
        # files where corresponding rate file is 'None'
        all_files = []
        badpix_types = []
        badpix_types_from_flats = ['DEAD', 'LOW_QE', 'OPEN', 'ADJ_OPEN']
        badpix_types_from_darks = ['HOT', 'RC', 'OTHER_BAD_PIXEL', 'TELEGRAPH']
        illuminated_obstimes = []
        if illuminated_raw_files:
            index = 0
            badpix_types.extend(badpix_types_from_flats)
            for uncal_file, rate_file in zip(illuminated_raw_files, illuminated_slope_files):
                self.get_metadata(uncal_file)
                if rate_file == 'None':
                    jump_output, rate_output, _ = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir,
                                                                                             ramp_fit=True, save_fitopt=False)
                    if self.nints > 1:
                        illuminated_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit')
                    else:
                        illuminated_slope_files[index] = deepcopy(rate_output)
                    index += 1

                # Get observation time for all files
                illuminated_obstimes.append(instrument_properties.get_obstime(uncal_file))

            all_files = deepcopy(illuminated_slope_files)

            min_illum_time = min(illuminated_obstimes)
            max_illum_time = max(illuminated_obstimes)
            mid_illum_time = instrument_properties.mean_time(illuminated_obstimes)

        # Dark files - Run calwebb_detector1 on all uncal files, saving the
        # Jump step output. If corresponding rate file is 'None', then also
        # run the ramp-fit step and save the output
        dark_jump_files = []
        dark_fitopt_files = []
        dark_obstimes = []
        if dark_raw_files:
            index = 0
            badpix_types.extend(badpix_types_from_darks)
            # In this case we need to run the pipeline on all input files,
            # even if the rate file is present, because we also need the jump
            # and fitops files, which are not saved by default
            for uncal_file, rate_file in zip(dark_raw_files, dark_slope_files):
                jump_output, rate_output, fitopt_output = pipeline_tools.calwebb_detector1_save_jump(uncal_file, self.data_dir,
                                                                                                     ramp_fit=True, save_fitopt=True)
                self.get_metadata(uncal_file)
                dark_jump_files.append(jump_output)
                dark_fitopt_files.append(fitopt_output)
                if self.nints > 1:
                    #dark_slope_files[index] = rate_output.replace('rate', 'rateints')
                    dark_slope_files[index] = rate_output.replace('0_ramp_fit', '1_ramp_fit')
                else:
                    dark_slope_files[index] = deepcopy(rate_output)
                dark_obstimes.append(instrument_properties.get_obstime(uncal_file))
                index += 1

            if len(all_files) == 0:
                all_files = deepcopy(dark_slope_files)
            else:
                all_files = all_files + dark_slope_files

            min_dark_time = min(dark_obstimes)
            max_dark_time = max(dark_obstimes)
            mid_dark_time = instrument_properties.mean_time(dark_obstimes)

        # For the dead flux check, filter out any files that have less than
        # 4 groups
        dead_flux_files = []
        if illuminated_raw_files:
            for illum_file in illuminated_raw_files:
                ngroup = fits.getheader(illum_file)['NGROUPS']
                if ngroup >= 4:
                    dead_flux_files.append(illum_file)
        if len(dead_flux_files) == 0:
            dead_flux_files = None

        # Instrument-specific preferences from jwst_reffiles meetings
        if self.instrument in ['nircam', 'niriss', 'fgs']:
            dead_search_type = 'sigma_rate'
        elif self.instrument in ['miri', 'nirspec']:
            dead_search_type = 'absolute_rate'

        flat_mean_normalization_method = 'smoothed'

        # Call the bad pixel search module from jwst_reffiles. Lots of
        # other possible parameters. Only specify the non-default params
        # in order to make things easier to read.
        query_string = 'darks_{}_flats_{}_to_{}'.format(self.dark_query_start, self.flat_query_start, self.query_end)
        output_file = '{}_{}_{}_bpm.fits'.format(self.instrument, self.aperture, query_string)
        output_file = os.path.join(self.output_dir, output_file)
        bad_pixel_mask.bad_pixels(flat_slope_files=illuminated_slope_files, dead_search_type=dead_search_type,
                                  flat_mean_normalization_method=flat_mean_normalization_method,
                                  run_dead_flux_check=True, dead_flux_check_files=dead_flux_files, flux_check=35000,
                                  dark_slope_files=dark_slope_files, dark_uncal_files=dark_raw_files,
                                  dark_jump_files=dark_jump_files, dark_fitopt_files=dark_fitopt_files, plot=False,
                                  output_file=output_file, author='jwst_reffiles', description='A bad pix mask',
                                  pedigree='GROUND', useafter='2222-04-01 00:00:00',
                                  history='This file was created by JWQL', quality_check=False)

        # Read in the newly-created bad pixel file
        set_permissions(output_file)
        badpix_map = fits.getdata(output_file)

        # Locate and read in the current bad pixel mask
        parameters = self.make_crds_parameter_dict()
        mask_dictionary = crds_tools.get_reffiles(parameters, ['mask'], download=True)
        baseline_file = mask_dictionary['mask']

        if 'NOT FOUND' in baseline_file:
            logging.warning(('\tNo baseline bad pixel file for {} {}. Any bad '
                             'pixels found as part of this search will be considered new'.format(self.instrument, self.aperture)))
            baseline_file = new_badpix_file
            yd, xd = badpix_mask.shape
            baseline_badpix_mask = np.zeros((yd, xd), type=np.int)
        else:
            logging.info('\tBaseline bad pixel file is {}'.format(baseline_file))
            baseline_badpix_mask = fits.getdata(baseline_file)

        # Exclude hot and dead pixels in the current bad pixel mask
        #new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot')
        new_since_reffile = exclude_crds_mask_pix(badpix_map, baseline_badpix_mask)

        # Create a list of the new instances of each type of bad pixel
        for bad_type in badpix_types:
            bad_location_list = bad_map_to_list(new_since_reffile, bad_type)

            # Add new hot and dead pixels to the database
            logging.info('\tFound {} new {} pixels'.format(len(bad_location_list[0]), bad_type))

            if bad_type in badpix_types_from_flats:
                self.add_bad_pix(bad_location_list, bad_type, illuminated_slope_files, min_illum_time, mid_illum_time, max_illum_time, baseline_file)
            elif bad_type in badpix_types_from_darks:
                self.add_bad_pix(bad_location_list, bad_type, dark_slope_files, min_dark_time, mid_dark_time, max_dark_time, baseline_file)
            else:
                raise ValueError("Unrecognized type of bad pixel: {}. Cannot update database table.".format(bad_type))