예제 #1
0
def get_aperture(instrname, apername):
    # siaf_path = os.path.join(bundle_dir, 'data', '{}_SIAF.xml'.format(instrname))
    # assert os.path.exists(siaf_path), 'no SIAF for {} at {}'.format(instrname, siaf_path)
    siaf = Siaf(instrument=instrname)
    return siaf[apername]
예제 #2
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for readnoise_monitor\n')

        # Get the output directory and setup a directory to store the data
        self.output_dir = os.path.join(get_config()['outputs'],
                                       'readnoise_monitor')
        ensure_dir_exists(os.path.join(self.output_dir, 'data'))

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in ['nircam', 'niriss']:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures for this instrument
            siaf = Siaf(self.instrument)
            possible_apertures = list(siaf.apertures)

            for aperture in possible_apertures:

                logging.info('\nWorking on aperture {} in {}'.format(
                    aperture, instrument))
                self.aperture = aperture

                # Locate the record of the most recent MAST search; use this time
                # (plus a 30 day buffer to catch any missing files from the previous
                # run) as the start time in the new MAST search.
                most_recent_search = self.most_recent_search()
                self.query_start = most_recent_search - 30

                # Query MAST for new dark files for this instrument/aperture
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))
                new_entries = mast_query_darks(instrument, aperture,
                                               self.query_start,
                                               self.query_end)
                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Set up a directory to store the data for this aperture
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                if len(new_entries) > 0:
                    ensure_dir_exists(self.data_dir)

                # Get any new files to process
                new_files = []
                checked_files = []
                for file_entry in new_entries:
                    output_filename = os.path.join(
                        self.data_dir,
                        file_entry['filename'].replace('_dark', '_uncal'))

                    # Sometimes both the dark and uncal name of a file is picked up in new_entries
                    if output_filename in checked_files:
                        logging.info(
                            '\t{} already checked in this run.'.format(
                                output_filename))
                        continue
                    checked_files.append(output_filename)

                    # Dont process files that already exist in the readnoise stats database
                    file_exists = self.file_exists_in_database(output_filename)
                    if file_exists:
                        logging.info(
                            '\t{} already exists in the readnoise database table.'
                            .format(output_filename))
                        continue

                    # Save any new uncal files with enough groups in the output directory; some dont exist in JWQL filesystem
                    try:
                        filename = filesystem_path(file_entry['filename'])
                        uncal_filename = filename.replace('_dark', '_uncal')
                        if not os.path.isfile(uncal_filename):
                            logging.info(
                                '\t{} does not exist in JWQL filesystem, even though {} does'
                                .format(uncal_filename, filename))
                        else:
                            num_groups = fits.getheader(
                                uncal_filename)['NGROUPS']
                            if num_groups > 1:  # skip processing if the file doesnt have enough groups to calculate the readnoise; TODO change to 10 before incorporating MIRI
                                shutil.copy(uncal_filename, self.data_dir)
                                logging.info('\tCopied {} to {}'.format(
                                    uncal_filename, output_filename))
                                set_permissions(output_filename)
                                new_files.append(output_filename)
                            else:
                                logging.info(
                                    '\tNot enough groups to calculate readnoise in {}'
                                    .format(uncal_filename))
                    except FileNotFoundError:
                        logging.info(
                            '\t{} does not exist in JWQL filesystem'.format(
                                file_entry['filename']))

                # Run the readnoise monitor on any new files
                if len(new_files) > 0:
                    self.process(new_files)
                    monitor_run = True
                else:
                    logging.info(
                        '\tReadnoise monitor skipped. {} new dark files for {}, {}.'
                        .format(len(new_files), instrument, aperture))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'entries_found': len(new_entries),
                    'files_found': len(new_files),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Readnoise Monitor completed successfully.')
예제 #3
0

def compute_v2v3_offset(aperture_a, aperture_b):
    '''
    For the same pixel coordinates, different V2, V3 coordinates are used
    depending on whether the coronagraph pupil wheel wedge is in the beam.
    The offset is computed by transforming the same pixel (Det) coordinates
    to V2, V3 in two different apertures and computing the difference in
    the resulting Tel frame coordinates
    '''
    x_a, y_a = aperture_a.det_to_tel(aperture_b.XDetRef, aperture_b.YDetRef)
    x_b, y_b = aperture_b.det_to_tel(aperture_b.XDetRef, aperture_b.YDetRef)
    return x_a - x_b, y_a - y_b


_NIRCAM_SIAF = Siaf('NIRCam')
_MIRI_SIAF = Siaf('MIRI')

_NIRCAM_CORON_OFFSET_TEL = compute_v2v3_offset(
    _NIRCAM_SIAF['NRCA5_MASKLWB'],
    _NIRCAM_SIAF['NRCA5_FULL']
)

# These bad areas were defined in raw detector pixel coordinates by
# John Stansberry using a backlit image of NIRCam A5 through the
# long-wavelength bar coronagraph pupil wedge. Colin Cox translated them to
# V2, V3 for A5

NIRCAM_CORON_BAD_AREAS = np.array(
       [[[ 56.525, -462.2092],
        [  56.5505, -456.9293],
예제 #4
0
    def run(self):
        """The main method.  See module docstrings for further details."""

        logging.info('Begin logging for bias_monitor')

        # Get the output directory and setup a directory to store the data
        self.output_dir = os.path.join(get_config()['outputs'], 'bias_monitor')
        ensure_dir_exists(os.path.join(self.output_dir, 'data'))

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in ['nircam', 'niriss', 'nirspec']:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible full-frame apertures for this instrument
            siaf = Siaf(self.instrument)
            possible_apertures = [
                aperture for aperture in siaf.apertures
                if siaf[aperture].AperType == 'FULLSCA'
            ]

            for aperture in possible_apertures:

                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))
                self.aperture = aperture

                # Locate the record of most recent MAST search; use this time
                # (plus a 30 day buffer to catch any missing files from
                # previous run) as the start time in the new MAST search.
                most_recent_search = self.most_recent_search()
                self.query_start = most_recent_search - 30

                # Query MAST for new dark files for this instrument/aperture
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))
                new_entries = monitor_utils.mast_query_darks(
                    instrument, aperture, self.query_start, self.query_end)

                # Exclude ASIC tuning data
                len_new_darks = len(new_entries)
                new_entries = monitor_utils.exclude_asic_tuning(new_entries)
                len_no_asic = len(new_entries)
                num_asic = len_new_darks - len_no_asic
                logging.info(
                    "\tFiltering out ASIC tuning files removed {} dark files.".
                    format(num_asic))

                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Set up a directory to store the data for this aperture
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                if len(new_entries) > 0:
                    ensure_dir_exists(self.data_dir)

                # Get any new files to process
                new_files = []
                for file_entry in new_entries:
                    output_filename = os.path.join(self.data_dir,
                                                   file_entry['filename'])
                    output_filename = output_filename.replace(
                        '_uncal.fits', '_uncal_0thgroup.fits').replace(
                            '_dark.fits', '_uncal_0thgroup.fits')

                    # Dont process files that already exist in the bias stats database
                    file_exists = self.file_exists_in_database(output_filename)
                    if file_exists:
                        logging.info(
                            '\t{} already exists in the bias database table.'.
                            format(output_filename))
                        continue

                    # Save the 0th group image from each new file in the output directory; some dont exist in JWQL filesystem.
                    try:
                        filename = filesystem_path(file_entry['filename'])
                        uncal_filename = filename.replace('_dark', '_uncal')
                        if not os.path.isfile(uncal_filename):
                            logging.info(
                                '\t{} does not exist in JWQL filesystem, even though {} does'
                                .format(uncal_filename, filename))
                        else:
                            new_file = self.extract_zeroth_group(
                                uncal_filename)
                            new_files.append(new_file)
                    except FileNotFoundError:
                        logging.info(
                            '\t{} does not exist in JWQL filesystem'.format(
                                file_entry['filename']))

                # Run the bias monitor on any new files
                if len(new_files) > 0:
                    self.process(new_files)
                    monitor_run = True
                else:
                    logging.info(
                        '\tBias monitor skipped. {} new dark files for {}, {}.'
                        .format(len(new_files), instrument, aperture))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'entries_found': len(new_entries),
                    'files_found': len(new_files),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Bias Monitor completed successfully.')
예제 #5
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [
                ap for ap in possible_apertures if ap not in apertures_to_skip
            ]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))

                # Find the appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']
                file_count_threshold = limits['Threshold'][match]

                # Locate the record of the most recent MAST search
                self.aperture = aperture
                self.query_start = self.most_recent_search()
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))

                # Query MAST using the aperture and the time of the
                # most recent previous search as the starting time
                new_entries = mast_query_darks(instrument, aperture,
                                               self.query_start,
                                               self.query_end)

                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Check to see if there are enough new files to meet the
                # monitor's signal-to-noise requirements
                if len(new_entries) >= file_count_threshold:
                    logging.info(
                        '\tSufficient new dark files found for {}, {} to run the dark monitor.'
                        .format(self.instrument, self.aperture))

                    # Get full paths to the files
                    new_filenames = []
                    for file_entry in new_entries:
                        try:
                            new_filenames.append(
                                filesystem_path(file_entry['filename']))
                        except FileNotFoundError:
                            logging.warning(
                                '\t\tUnable to locate {} in filesystem. Not including in processing.'
                                .format(file_entry['filename']))

                    # Set up directories for the copied data
                    ensure_dir_exists(os.path.join(self.output_dir, 'data'))
                    self.data_dir = os.path.join(
                        self.output_dir,
                        'data/{}_{}'.format(self.instrument.lower(),
                                            self.aperture.lower()))
                    ensure_dir_exists(self.data_dir)

                    # Copy files from filesystem
                    dark_files, not_copied = copy_files(
                        new_filenames, self.data_dir)

                    logging.info('\tNew_filenames: {}'.format(new_filenames))
                    logging.info('\tData dir: {}'.format(self.data_dir))
                    logging.info(
                        '\tCopied to working dir: {}'.format(dark_files))
                    logging.info('\tNot copied: {}'.format(not_copied))

                    # Run the dark monitor
                    self.process(dark_files)
                    monitor_run = True

                else:
                    logging.info((
                        '\tDark monitor skipped. {} new dark files for {}, {}. {} new files are '
                        'required to run dark current monitor.').format(
                            len(new_entries), instrument, aperture,
                            file_count_threshold[0]))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'files_found': len(new_entries),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')
예제 #6
0
    def process(self, file_list):
        """The main method for processing darks.  See module docstrings
        for further details.

        Parameters
        ----------
        file_list : list
            List of filenames (including full paths) to the dark current
            files
        """

        # Basic metadata that will be needed later
        self.get_metadata(file_list[0])

        # Determine which pipeline steps need to be executed
        required_steps = pipeline_tools.get_pipeline_steps(self.instrument)
        logging.info(
            '\tRequired calwebb1_detector pipeline steps to have the data in the '
            'correct format:')
        for item in required_steps:
            logging.info('\t\t{}: {}'.format(item, required_steps[item]))

        # Modify the list of pipeline steps to skip those not needed for the
        # preparation of dark current data
        required_steps['dark_current'] = False
        required_steps['persistence'] = False

        # NIRSpec IR^2 readout pattern NRSIRS2 is the only one with
        # nframes not a power of 2
        if self.read_pattern not in pipeline_tools.GROUPSCALE_READOUT_PATTERNS:
            required_steps['group_scale'] = False

        # Run pipeline steps on files, generating slope files
        slope_files = []
        for filename in file_list:

            completed_steps = pipeline_tools.completed_pipeline_steps(filename)
            steps_to_run = pipeline_tools.steps_to_run(required_steps,
                                                       completed_steps)

            logging.info('\tWorking on file: {}'.format(filename))
            logging.info('\tPipeline steps that remain to be run:')
            for item in steps_to_run:
                logging.info('\t\t{}: {}'.format(item, steps_to_run[item]))

            # Run any remaining required pipeline steps
            if any(steps_to_run.values()) is False:
                slope_files.append(filename)
            else:
                processed_file = filename.replace('.fits',
                                                  '_{}.fits'.format('rate'))

                # If the slope file already exists, skip the pipeline call
                if not os.path.isfile(processed_file):
                    logging.info('\tRunning pipeline on {}'.format(filename))
                    processed_file = pipeline_tools.run_calwebb_detector1_steps(
                        os.path.abspath(filename), steps_to_run)
                    logging.info('\tPipeline complete. Output: {}'.format(
                        processed_file))

                else:
                    logging.info(
                        '\tSlope file {} already exists. Skipping call to pipeline.'
                        .format(processed_file))
                    pass

                slope_files.append(processed_file)

                # Delete the original dark ramp file to save disk space
                os.remove(filename)

        obs_times = []
        logging.info(
            '\tSlope images to use in the dark monitor for {}, {}:'.format(
                self.instrument, self.aperture))
        for item in slope_files:
            logging.info('\t\t{}'.format(item))
            # Get the observation time for each file
            obstime = instrument_properties.get_obstime(item)
            obs_times.append(obstime)

        # Find the earliest and latest observation time, and calculate
        # the mid-time.
        min_time = np.min(obs_times)
        max_time = np.max(obs_times)
        mid_time = instrument_properties.mean_time(obs_times)

        # Read in all slope images and place into a list
        slope_image_stack, slope_exptimes = pipeline_tools.image_stack(
            slope_files)

        # Calculate a mean slope image from the inputs
        slope_image, stdev_image = calculations.mean_image(slope_image_stack,
                                                           sigma_threshold=3)
        mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image,
                                                     slope_files)
        logging.info(
            '\tSigma-clipped mean of the slope images saved to: {}'.format(
                mean_slope_file))

        # ----- Search for new hot/dead/noisy pixels -----
        # Read in baseline mean slope image and stdev image
        # The baseline image is used to look for hot/dead/noisy pixels,
        # but not for comparing mean dark rates. Therefore, updates to
        # the baseline can be minimal.

        # Limit checks for hot/dead/noisy pixels to full frame data since
        # subarray data have much shorter exposure times and therefore lower
        # signal-to-noise
        aperture_type = Siaf(self.instrument)[self.aperture].AperType
        if aperture_type == 'FULLSCA':
            baseline_file = self.get_baseline_filename()
            if baseline_file is None:
                logging.warning((
                    '\tNo baseline dark current countrate image for {} {}. Setting the '
                    'current mean slope image to be the new baseline.'.format(
                        self.instrument, self.aperture)))
                baseline_file = mean_slope_file
                baseline_mean = deepcopy(slope_image)
                baseline_stdev = deepcopy(stdev_image)
            else:
                logging.info('\tBaseline file is {}'.format(baseline_file))
                baseline_mean, baseline_stdev = self.read_baseline_slope_image(
                    baseline_file)

            # Check the hot/dead pixel population for changes
            new_hot_pix, new_dead_pix = self.find_hot_dead_pixels(
                slope_image, baseline_mean)

            # Shift the coordinates to be in full frame coordinate system
            new_hot_pix = self.shift_to_full_frame(new_hot_pix)
            new_dead_pix = self.shift_to_full_frame(new_dead_pix)

            # Exclude hot and dead pixels found previously
            new_hot_pix = self.exclude_existing_badpix(new_hot_pix, 'hot')
            new_dead_pix = self.exclude_existing_badpix(new_dead_pix, 'dead')

            # Add new hot and dead pixels to the database
            logging.info('\tFound {} new hot pixels'.format(len(
                new_hot_pix[0])))
            logging.info('\tFound {} new dead pixels'.format(
                len(new_dead_pix[0])))
            self.add_bad_pix(new_hot_pix, 'hot', file_list, mean_slope_file,
                             baseline_file, min_time, mid_time, max_time)
            self.add_bad_pix(new_dead_pix, 'dead', file_list, mean_slope_file,
                             baseline_file, min_time, mid_time, max_time)

            # Check for any pixels that are significantly more noisy than
            # in the baseline stdev image
            new_noisy_pixels = self.noise_check(stdev_image, baseline_stdev)

            # Shift coordinates to be in full_frame coordinate system
            new_noisy_pixels = self.shift_to_full_frame(new_noisy_pixels)

            # Exclude previously found noisy pixels
            new_noisy_pixels = self.exclude_existing_badpix(
                new_noisy_pixels, 'noisy')

            # Add new noisy pixels to the database
            logging.info('\tFound {} new noisy pixels'.format(
                len(new_noisy_pixels[0])))
            self.add_bad_pix(new_noisy_pixels, 'noisy', file_list,
                             mean_slope_file, baseline_file, min_time,
                             mid_time, max_time)

        # ----- Calculate image statistics -----

        # Find amplifier boundaries so per-amp statistics can be calculated
        number_of_amps, amp_bounds = instrument_properties.amplifier_info(
            slope_files[0])
        logging.info('\tAmplifier boundaries: {}'.format(amp_bounds))

        # Calculate mean and stdev values, and fit a Gaussian to the
        # histogram of the pixels in each amp
        (amp_mean, amp_stdev, gauss_param, gauss_chisquared,
         double_gauss_params, double_gauss_chisquared, histogram,
         bins) = self.stats_by_amp(slope_image, amp_bounds)

        # Construct new entry for dark database table
        source_files = [os.path.basename(item) for item in file_list]
        for key in amp_mean.keys():
            dark_db_entry = {
                'aperture': self.aperture,
                'amplifier': key,
                'mean': amp_mean[key],
                'stdev': amp_stdev[key],
                'source_files': source_files,
                'obs_start_time': min_time,
                'obs_mid_time': mid_time,
                'obs_end_time': max_time,
                'gauss_amplitude': list(gauss_param[key][0]),
                'gauss_peak': list(gauss_param[key][1]),
                'gauss_width': list(gauss_param[key][2]),
                'gauss_chisq': gauss_chisquared[key],
                'double_gauss_amplitude1': double_gauss_params[key][0],
                'double_gauss_peak1': double_gauss_params[key][1],
                'double_gauss_width1': double_gauss_params[key][2],
                'double_gauss_amplitude2': double_gauss_params[key][3],
                'double_gauss_peak2': double_gauss_params[key][4],
                'double_gauss_width2': double_gauss_params[key][5],
                'double_gauss_chisq': double_gauss_chisquared[key],
                'mean_dark_image_file': os.path.basename(mean_slope_file),
                'hist_dark_values': bins,
                'hist_amplitudes': histogram,
                'entry_date': datetime.datetime.now()
            }
            self.stats_table.__table__.insert().execute(dark_db_entry)
예제 #7
0
"""Telescope coordinate information"""

import numpy as np
import logging
_log = logging.getLogger('pynrc')

__epsilon = np.finfo(float).eps

import pysiaf
from pysiaf import JWST_PRD_VERSION, rotations, Siaf
# Create this once since it takes time to call multiple times
siaf_nrc = Siaf('NIRCam')
siaf_nrc.generate_toc()

# Functions transferred to webbpsf_ext
from webbpsf_ext.coords import dist_image
from webbpsf_ext.coords import xy_to_rtheta, rtheta_to_xy, xy_rot
from webbpsf_ext.coords import ap_radec, radec_to_v2v3, v2v3_to_pixel
from webbpsf_ext.coords import get_NRC_v2v3_limits, NIRCam_V2V3_limits
from webbpsf_ext.coords import get_NRC_v2v3_limits as get_v2v3_limits # Original name
from webbpsf_ext.coords import plotAxes

# New stuff from webbpsf_ext
from webbpsf_ext.coords import gen_sgd_offsets, get_idl_offset, radec_offset
from webbpsf_ext.coords import jwst_point

###########################################################################
#
#    NIRCam Coordinate Systems
#
###########################################################################
예제 #8
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [
                ap for ap in possible_apertures if ap not in apertures_to_skip
            ]

            # Get a list of all possible readout patterns associated with the aperture
            possible_readpatts = RAPID_READPATTERNS[instrument]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))

                # Find appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']

                # If the aperture is not listed in the threshold file, we need
                # a default
                if not np.any(match):
                    file_count_threshold = 30
                    logging.warning((
                        '\tAperture {} is not present in the threshold file. Continuing '
                        'with the default threshold of 30 files.'.format(
                            aperture)))
                else:
                    file_count_threshold = limits['Threshold'][match][0]
                self.aperture = aperture

                # We need a separate search for each readout pattern
                for readpatt in possible_readpatts:
                    self.readpatt = readpatt
                    logging.info('\tWorking on readout pattern: {}'.format(
                        self.readpatt))

                    # Locate the record of the most recent MAST search
                    self.query_start = self.most_recent_search()
                    logging.info('\tQuery times: {} {}'.format(
                        self.query_start, self.query_end))

                    # Query MAST using the aperture and the time of the
                    # most recent previous search as the starting time
                    new_entries = mast_query_darks(instrument,
                                                   aperture,
                                                   self.query_start,
                                                   self.query_end,
                                                   readpatt=self.readpatt)
                    logging.info(
                        '\tAperture: {}, Readpattern: {}, new entries: {}'.
                        format(self.aperture, self.readpatt, len(new_entries)))

                    # Check to see if there are enough new files to meet the
                    # monitor's signal-to-noise requirements
                    if len(new_entries) >= file_count_threshold:
                        logging.info(
                            '\tMAST query has returned sufficient new dark files for {}, {}, {} to run the dark monitor.'
                            .format(self.instrument, self.aperture,
                                    self.readpatt))

                        # Get full paths to the files
                        new_filenames = []
                        for file_entry in new_entries:
                            try:
                                new_filenames.append(
                                    filesystem_path(file_entry['filename']))
                            except FileNotFoundError:
                                logging.warning(
                                    '\t\tUnable to locate {} in filesystem. Not including in processing.'
                                    .format(file_entry['filename']))

                        # In some (unusual) cases, there are files in MAST with the correct aperture name
                        # but incorrect array sizes. Make sure that the new files all have the expected
                        # aperture size
                        temp_filenames = []
                        bad_size_filenames = []
                        expected_ap = Siaf(instrument)[aperture]
                        expected_xsize = expected_ap.XSciSize
                        expected_ysize = expected_ap.YSciSize
                        for new_file in new_filenames:
                            with fits.open(new_file) as hdulist:
                                xsize = hdulist[0].header['SUBSIZE1']
                                ysize = hdulist[0].header['SUBSIZE2']
                            if xsize == expected_xsize and ysize == expected_ysize:
                                temp_filenames.append(new_file)
                            else:
                                bad_size_filenames.append(new_file)
                        if len(temp_filenames) != len(new_filenames):
                            logging.info(
                                '\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: '
                            )
                            for badfile in bad_size_filenames:
                                logging.info('\t\t{}'.format(badfile))
                        new_filenames = deepcopy(temp_filenames)

                        # If it turns out that the monitor doesn't find enough
                        # of the files returned by the MAST query to meet the threshold,
                        # then the monitor will not be run
                        if len(new_filenames) < file_count_threshold:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files. "
                                "This is less than the required minimum number of files ({}) necessary to run "
                                "the monitor. Quitting.").format(
                                    len(new_filenames), file_count_threshold))
                            monitor_run = False
                        else:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files."
                            ).format(len(new_filenames)))
                            monitor_run = True

                        if monitor_run:
                            # Set up directories for the copied data
                            ensure_dir_exists(
                                os.path.join(self.output_dir, 'data'))
                            self.data_dir = os.path.join(
                                self.output_dir,
                                'data/{}_{}'.format(self.instrument.lower(),
                                                    self.aperture.lower()))
                            ensure_dir_exists(self.data_dir)

                            # Copy files from filesystem
                            dark_files, not_copied = copy_files(
                                new_filenames, self.data_dir)

                            logging.info(
                                '\tNew_filenames: {}'.format(new_filenames))
                            logging.info('\tData dir: {}'.format(
                                self.data_dir))
                            logging.info('\tCopied to working dir: {}'.format(
                                dark_files))
                            logging.info('\tNot copied: {}'.format(not_copied))

                            # Run the dark monitor
                            self.process(dark_files)

                    else:
                        logging.info((
                            '\tDark monitor skipped. MAST query has returned {} new dark files for '
                            '{}, {}, {}. {} new files are required to run dark current monitor.'
                        ).format(len(new_entries), instrument, aperture,
                                 self.readpatt, file_count_threshold))
                        monitor_run = False

                    # Update the query history
                    new_entry = {
                        'instrument': instrument,
                        'aperture': aperture,
                        'readpattern': self.readpatt,
                        'start_time_mjd': self.query_start,
                        'end_time_mjd': self.query_end,
                        'files_found': len(new_entries),
                        'run_monitor': monitor_run,
                        'entry_date': datetime.datetime.now()
                    }
                    self.query_table.__table__.insert().execute(new_entry)
                    logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')