예제 #1
0
파일: views.py 프로젝트: cracraft/jwql
def archived_proposals_ajax(request, inst):
    """Generate the page listing all archived proposals in the database

    Parameters
    ----------
    request : HttpRequest object
        Incoming request from the webpage
    inst : str
        Name of JWST instrument

    Returns
    -------
    JsonResponse object
        Outgoing response sent to the webpage
    """
    # Ensure the instrument is correctly capitalized
    inst = JWST_INSTRUMENT_NAMES_MIXEDCASE[inst.lower()]

    # Get list of all files for the given instrument
    filenames_public = get_filenames_by_instrument(inst, restriction='public')
    filenames_proprietary = get_filenames_by_instrument(
        inst, restriction='proprietary')

    # Determine locations to the files
    filenames = []
    for filename in filenames_public:
        try:
            relative_filepath = filesystem_path(filename,
                                                check_existence=False)
            full_filepath = os.path.join(FILESYSTEM_DIR, 'public',
                                         relative_filepath)
            filenames.append(full_filepath)
        except ValueError:
            print('Unable to determine filepath for {}'.format(filename))
    for filename in filenames_proprietary:
        try:
            relative_filepath = filesystem_path(filename,
                                                check_existence=False)
            full_filepath = os.path.join(FILESYSTEM_DIR, 'proprietary',
                                         relative_filepath)
            filenames.append(full_filepath)
        except ValueError:
            print('Unable to determine filepath for {}'.format(filename))

    # Gather information about the proposals for the given instrument
    proposal_info = get_proposal_info(filenames)

    context = {
        'inst': inst,
        'all_filenames': filenames,
        'num_proposals': proposal_info['num_proposals'],
        'thumbnails': {
            'proposals': proposal_info['proposals'],
            'thumbnail_paths': proposal_info['thumbnail_paths'],
            'num_files': proposal_info['num_files']
        }
    }

    return JsonResponse(context, json_dumps_params={'indent': 2})
예제 #2
0
def locate_uncal_files(query_result):
    """Given a MAST query result, locate the raw version
    (``uncal.fits``) of the listed files in the filesystem.

    Parameters
    ----------
    query_result : list
        MAST query results. List of dictionaries

    Returns
    -------
    uncal_files : list
        List of raw file locations within the filesystem
    """
    uncal_files = []
    for entry in query_result:
        filename = entry['filename']
        suffix = filename.split('_')[-1].replace('.fits', '')
        uncal_file = filename.replace(suffix, 'uncal')

        # Look for uncal file
        try:
            uncal_files.append(filesystem_path(uncal_file))
        except FileNotFoundError:
            logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.'
                            .format(uncal_file))
    return uncal_files
예제 #3
0
def test_filesystem_path():
    """Test that a file's location in the filesystem is returned"""

    filename = 'jw96003001001_02201_00001_nrca1_dark.fits'
    check = filesystem_path(filename)
    location = os.path.join(get_config()['filesystem'], 'jw96003', filename)

    assert check == location
예제 #4
0
def locate_rate_files(uncal_files):
    """Given a list of uncal (raw) files, generate a list of
    corresponding rate files. For each uncal file, if the rate file
    is present in the filesystem, add the name of the rate file (if
    a rateints file exists, use that) to the list of files. If no
    rate file is present, add ``None`` to the list.

    Parameters
    ----------
    uncal_files : list
        List of uncal files to use as the basis of the search

    Returns
    -------
    rate_files : list
        List of rate files. This list corresponds 1-to-1 with
        ``uncal_files``. Any missing rate files are listed as None.

    rate_files_to_copy : list
        Same as ``rate_files`` but without the None entries. This is
        a list of only the rate files that exist in the filesystem
    """
    if uncal_files is None:
        return None, None

    rate_files = []
    rate_files_to_copy = []
    for uncal in uncal_files:
        base = uncal.split('_uncal.fits')[0]
        constructed_ratefile = '{}_rateints.fits'.format(base)
        try:
            rate_files.append(filesystem_path(constructed_ratefile))
            rate_files_to_copy.append(filesystem_path(constructed_ratefile))
        except FileNotFoundError:
            constructed_ratefile = '{}_rate.fits'.format(base)
            try:
                rate_files.append(filesystem_path(constructed_ratefile))
                rate_files_to_copy.append(
                    filesystem_path(constructed_ratefile))
            except FileNotFoundError:
                rate_files.append('None')
    return rate_files, rate_files_to_copy
예제 #5
0
    def most_recent_coords(self, bad_pixel_type):
        """Return the coordinates of the bad pixels in the most recent
        database entry for the given bad pixel type

        Parameters
        ----------
        bad_pixel_type : str
            The flavor of bad pixel (e.g. 'hot')

        Returns
        -------
        coords : tup
            Tuple containing a list of x coordinates and a list of y
            coordinates
        """
        # Find all the rows corresponding to the requested type of bad pixel
        rows = [
            row for row in self.bad_pixel_table if row.type == bad_pixel_type
        ]

        # Extract dates, number of bad pixels, and files used from each entry
        dates = [row.obs_mid_time for row in rows]
        coords = [row.coordinates for row in rows]
        files = [row.source_files[0] for row in rows]

        # If there are no valid entres in the database, return None
        if len(dates) == 0:
            return None, None

        # Sort by date to make sure everything is in chronological order
        chrono = np.argsort(dates)
        dates = dates[chrono]
        coords = coords[chrono]
        files = files[chrono]

        # Keep track of the latest timestamp
        self.last_timestamp = dates[-1].isoformat()

        # Grab the name of one of the files used when these bad pixels
        # were identified. We'll use this as an image on top of which
        # the bad pixels will be noted. Note that these should be
        # slope files
        self.image_file = filesystem_path(files[-1])

        # Return the list of coordinates for the most recent entry
        return coords[-1]
예제 #6
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for readnoise_monitor\n')

        # Get the output directory and setup a directory to store the data
        self.output_dir = os.path.join(get_config()['outputs'],
                                       'readnoise_monitor')
        ensure_dir_exists(os.path.join(self.output_dir, 'data'))

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in ['nircam', 'niriss']:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures for this instrument
            siaf = Siaf(self.instrument)
            possible_apertures = list(siaf.apertures)

            for aperture in possible_apertures:

                logging.info('\nWorking on aperture {} in {}'.format(
                    aperture, instrument))
                self.aperture = aperture

                # Locate the record of the most recent MAST search; use this time
                # (plus a 30 day buffer to catch any missing files from the previous
                # run) as the start time in the new MAST search.
                most_recent_search = self.most_recent_search()
                self.query_start = most_recent_search - 30

                # Query MAST for new dark files for this instrument/aperture
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))
                new_entries = mast_query_darks(instrument, aperture,
                                               self.query_start,
                                               self.query_end)
                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Set up a directory to store the data for this aperture
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                if len(new_entries) > 0:
                    ensure_dir_exists(self.data_dir)

                # Get any new files to process
                new_files = []
                checked_files = []
                for file_entry in new_entries:
                    output_filename = os.path.join(
                        self.data_dir,
                        file_entry['filename'].replace('_dark', '_uncal'))

                    # Sometimes both the dark and uncal name of a file is picked up in new_entries
                    if output_filename in checked_files:
                        logging.info(
                            '\t{} already checked in this run.'.format(
                                output_filename))
                        continue
                    checked_files.append(output_filename)

                    # Dont process files that already exist in the readnoise stats database
                    file_exists = self.file_exists_in_database(output_filename)
                    if file_exists:
                        logging.info(
                            '\t{} already exists in the readnoise database table.'
                            .format(output_filename))
                        continue

                    # Save any new uncal files with enough groups in the output directory; some dont exist in JWQL filesystem
                    try:
                        filename = filesystem_path(file_entry['filename'])
                        uncal_filename = filename.replace('_dark', '_uncal')
                        if not os.path.isfile(uncal_filename):
                            logging.info(
                                '\t{} does not exist in JWQL filesystem, even though {} does'
                                .format(uncal_filename, filename))
                        else:
                            num_groups = fits.getheader(
                                uncal_filename)['NGROUPS']
                            if num_groups > 1:  # skip processing if the file doesnt have enough groups to calculate the readnoise; TODO change to 10 before incorporating MIRI
                                shutil.copy(uncal_filename, self.data_dir)
                                logging.info('\tCopied {} to {}'.format(
                                    uncal_filename, output_filename))
                                set_permissions(output_filename)
                                new_files.append(output_filename)
                            else:
                                logging.info(
                                    '\tNot enough groups to calculate readnoise in {}'
                                    .format(uncal_filename))
                    except FileNotFoundError:
                        logging.info(
                            '\t{} does not exist in JWQL filesystem'.format(
                                file_entry['filename']))

                # Run the readnoise monitor on any new files
                if len(new_files) > 0:
                    self.process(new_files)
                    monitor_run = True
                else:
                    logging.info(
                        '\tReadnoise monitor skipped. {} new dark files for {}, {}.'
                        .format(len(new_files), instrument, aperture))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'entries_found': len(new_entries),
                    'files_found': len(new_files),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Readnoise Monitor completed successfully.')
예제 #7
0
    def run(self):
        """The main method.  See module docstrings for further details."""

        logging.info('Begin logging for bias_monitor')

        # Get the output directory and setup a directory to store the data
        self.output_dir = os.path.join(get_config()['outputs'], 'bias_monitor')
        ensure_dir_exists(os.path.join(self.output_dir, 'data'))

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in ['nircam', 'niriss', 'nirspec']:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible full-frame apertures for this instrument
            siaf = Siaf(self.instrument)
            possible_apertures = [
                aperture for aperture in siaf.apertures
                if siaf[aperture].AperType == 'FULLSCA'
            ]

            for aperture in possible_apertures:

                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))
                self.aperture = aperture

                # Locate the record of most recent MAST search; use this time
                # (plus a 30 day buffer to catch any missing files from
                # previous run) as the start time in the new MAST search.
                most_recent_search = self.most_recent_search()
                self.query_start = most_recent_search - 30

                # Query MAST for new dark files for this instrument/aperture
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))
                new_entries = monitor_utils.mast_query_darks(
                    instrument, aperture, self.query_start, self.query_end)

                # Exclude ASIC tuning data
                len_new_darks = len(new_entries)
                new_entries = monitor_utils.exclude_asic_tuning(new_entries)
                len_no_asic = len(new_entries)
                num_asic = len_new_darks - len_no_asic
                logging.info(
                    "\tFiltering out ASIC tuning files removed {} dark files.".
                    format(num_asic))

                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Set up a directory to store the data for this aperture
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                if len(new_entries) > 0:
                    ensure_dir_exists(self.data_dir)

                # Get any new files to process
                new_files = []
                for file_entry in new_entries:
                    output_filename = os.path.join(self.data_dir,
                                                   file_entry['filename'])
                    output_filename = output_filename.replace(
                        '_uncal.fits', '_uncal_0thgroup.fits').replace(
                            '_dark.fits', '_uncal_0thgroup.fits')

                    # Dont process files that already exist in the bias stats database
                    file_exists = self.file_exists_in_database(output_filename)
                    if file_exists:
                        logging.info(
                            '\t{} already exists in the bias database table.'.
                            format(output_filename))
                        continue

                    # Save the 0th group image from each new file in the output directory; some dont exist in JWQL filesystem.
                    try:
                        filename = filesystem_path(file_entry['filename'])
                        uncal_filename = filename.replace('_dark', '_uncal')
                        if not os.path.isfile(uncal_filename):
                            logging.info(
                                '\t{} does not exist in JWQL filesystem, even though {} does'
                                .format(uncal_filename, filename))
                        else:
                            new_file = self.extract_zeroth_group(
                                uncal_filename)
                            new_files.append(new_file)
                    except FileNotFoundError:
                        logging.info(
                            '\t{} does not exist in JWQL filesystem'.format(
                                file_entry['filename']))

                # Run the bias monitor on any new files
                if len(new_files) > 0:
                    self.process(new_files)
                    monitor_run = True
                else:
                    logging.info(
                        '\tBias monitor skipped. {} new dark files for {}, {}.'
                        .format(len(new_files), instrument, aperture))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'entries_found': len(new_entries),
                    'files_found': len(new_files),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Bias Monitor completed successfully.')
예제 #8
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [
                ap for ap in possible_apertures if ap not in apertures_to_skip
            ]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))

                # Find the appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']
                file_count_threshold = limits['Threshold'][match]

                # Locate the record of the most recent MAST search
                self.aperture = aperture
                self.query_start = self.most_recent_search()
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))

                # Query MAST using the aperture and the time of the
                # most recent previous search as the starting time
                new_entries = mast_query_darks(instrument, aperture,
                                               self.query_start,
                                               self.query_end)

                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Check to see if there are enough new files to meet the
                # monitor's signal-to-noise requirements
                if len(new_entries) >= file_count_threshold:
                    logging.info(
                        '\tSufficient new dark files found for {}, {} to run the dark monitor.'
                        .format(self.instrument, self.aperture))

                    # Get full paths to the files
                    new_filenames = []
                    for file_entry in new_entries:
                        try:
                            new_filenames.append(
                                filesystem_path(file_entry['filename']))
                        except FileNotFoundError:
                            logging.warning(
                                '\t\tUnable to locate {} in filesystem. Not including in processing.'
                                .format(file_entry['filename']))

                    # Set up directories for the copied data
                    ensure_dir_exists(os.path.join(self.output_dir, 'data'))
                    self.data_dir = os.path.join(
                        self.output_dir,
                        'data/{}_{}'.format(self.instrument.lower(),
                                            self.aperture.lower()))
                    ensure_dir_exists(self.data_dir)

                    # Copy files from filesystem
                    dark_files, not_copied = copy_files(
                        new_filenames, self.data_dir)

                    logging.info('\tNew_filenames: {}'.format(new_filenames))
                    logging.info('\tData dir: {}'.format(self.data_dir))
                    logging.info(
                        '\tCopied to working dir: {}'.format(dark_files))
                    logging.info('\tNot copied: {}'.format(not_copied))

                    # Run the dark monitor
                    self.process(dark_files)
                    monitor_run = True

                else:
                    logging.info((
                        '\tDark monitor skipped. {} new dark files for {}, {}. {} new files are '
                        'required to run dark current monitor.').format(
                            len(new_entries), instrument, aperture,
                            file_count_threshold[0]))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'files_found': len(new_entries),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')
예제 #9
0
def get_header_info(filename):
    """Return the header information for a given ``filename``.

    Parameters
    ----------
    filename : str
        The name of the file of interest, without the extension
        (e.g. ``'jw86600008001_02101_00007_guider2_uncal'``).

    Returns
    -------
    header_info : dict
        The FITS headers of the extensions in the given ``file``.
    """

    # Initialize dictionary to store header information
    header_info = {}

    # Open the file
    fits_filepath = filesystem_path(filename, search='*_rate.fits')
    hdulist = fits.open(fits_filepath)

    # Extract header information from file
    for ext in range(0, len(hdulist)):
        #for ext in range(0, 1):

        # Initialize dictionary to store header information for particular extension
        header_info[ext] = {}

        # Get header
        header = hdulist[ext].header

        # Determine the extension name
        if ext == 0:
            header_info[ext]['EXTNAME'] = 'PRIMARY'
        else:
            header_info[ext]['EXTNAME'] = header['EXTNAME']

        # Get list of keywords and values
        exclude_list = ['', 'COMMENT']
        header_info[ext]['keywords'] = [
            item for item in list(header.keys()) if item not in exclude_list
        ]
        header_info[ext]['values'] = []
        for key in header_info[ext]['keywords']:
            header_info[ext]['values'].append(hdulist[ext].header[key])

    # Close the file
    hdulist.close()

    # Build tables
    for ext in header_info:
        data_dict = {}
        data_dict['Keyword'] = header_info[ext]['keywords']
        data_dict['Value'] = header_info[ext]['values']
        header_info[ext]['table'] = pd.DataFrame(data_dict)
        header_info[ext]['table_rows'] = header_info[ext]['table'].values
        header_info[ext]['table_columns'] = header_info[ext][
            'table'].columns.values

    return header_info
예제 #10
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [
                ap for ap in possible_apertures if ap not in apertures_to_skip
            ]

            # Get a list of all possible readout patterns associated with the aperture
            possible_readpatts = RAPID_READPATTERNS[instrument]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))

                # Find appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']

                # If the aperture is not listed in the threshold file, we need
                # a default
                if not np.any(match):
                    file_count_threshold = 30
                    logging.warning((
                        '\tAperture {} is not present in the threshold file. Continuing '
                        'with the default threshold of 30 files.'.format(
                            aperture)))
                else:
                    file_count_threshold = limits['Threshold'][match][0]
                self.aperture = aperture

                # We need a separate search for each readout pattern
                for readpatt in possible_readpatts:
                    self.readpatt = readpatt
                    logging.info('\tWorking on readout pattern: {}'.format(
                        self.readpatt))

                    # Locate the record of the most recent MAST search
                    self.query_start = self.most_recent_search()
                    logging.info('\tQuery times: {} {}'.format(
                        self.query_start, self.query_end))

                    # Query MAST using the aperture and the time of the
                    # most recent previous search as the starting time
                    new_entries = mast_query_darks(instrument,
                                                   aperture,
                                                   self.query_start,
                                                   self.query_end,
                                                   readpatt=self.readpatt)
                    logging.info(
                        '\tAperture: {}, Readpattern: {}, new entries: {}'.
                        format(self.aperture, self.readpatt, len(new_entries)))

                    # Check to see if there are enough new files to meet the
                    # monitor's signal-to-noise requirements
                    if len(new_entries) >= file_count_threshold:
                        logging.info(
                            '\tMAST query has returned sufficient new dark files for {}, {}, {} to run the dark monitor.'
                            .format(self.instrument, self.aperture,
                                    self.readpatt))

                        # Get full paths to the files
                        new_filenames = []
                        for file_entry in new_entries:
                            try:
                                new_filenames.append(
                                    filesystem_path(file_entry['filename']))
                            except FileNotFoundError:
                                logging.warning(
                                    '\t\tUnable to locate {} in filesystem. Not including in processing.'
                                    .format(file_entry['filename']))

                        # In some (unusual) cases, there are files in MAST with the correct aperture name
                        # but incorrect array sizes. Make sure that the new files all have the expected
                        # aperture size
                        temp_filenames = []
                        bad_size_filenames = []
                        expected_ap = Siaf(instrument)[aperture]
                        expected_xsize = expected_ap.XSciSize
                        expected_ysize = expected_ap.YSciSize
                        for new_file in new_filenames:
                            with fits.open(new_file) as hdulist:
                                xsize = hdulist[0].header['SUBSIZE1']
                                ysize = hdulist[0].header['SUBSIZE2']
                            if xsize == expected_xsize and ysize == expected_ysize:
                                temp_filenames.append(new_file)
                            else:
                                bad_size_filenames.append(new_file)
                        if len(temp_filenames) != len(new_filenames):
                            logging.info(
                                '\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: '
                            )
                            for badfile in bad_size_filenames:
                                logging.info('\t\t{}'.format(badfile))
                        new_filenames = deepcopy(temp_filenames)

                        # If it turns out that the monitor doesn't find enough
                        # of the files returned by the MAST query to meet the threshold,
                        # then the monitor will not be run
                        if len(new_filenames) < file_count_threshold:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files. "
                                "This is less than the required minimum number of files ({}) necessary to run "
                                "the monitor. Quitting.").format(
                                    len(new_filenames), file_count_threshold))
                            monitor_run = False
                        else:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files."
                            ).format(len(new_filenames)))
                            monitor_run = True

                        if monitor_run:
                            # Set up directories for the copied data
                            ensure_dir_exists(
                                os.path.join(self.output_dir, 'data'))
                            self.data_dir = os.path.join(
                                self.output_dir,
                                'data/{}_{}'.format(self.instrument.lower(),
                                                    self.aperture.lower()))
                            ensure_dir_exists(self.data_dir)

                            # Copy files from filesystem
                            dark_files, not_copied = copy_files(
                                new_filenames, self.data_dir)

                            logging.info(
                                '\tNew_filenames: {}'.format(new_filenames))
                            logging.info('\tData dir: {}'.format(
                                self.data_dir))
                            logging.info('\tCopied to working dir: {}'.format(
                                dark_files))
                            logging.info('\tNot copied: {}'.format(not_copied))

                            # Run the dark monitor
                            self.process(dark_files)

                    else:
                        logging.info((
                            '\tDark monitor skipped. MAST query has returned {} new dark files for '
                            '{}, {}, {}. {} new files are required to run dark current monitor.'
                        ).format(len(new_entries), instrument, aperture,
                                 self.readpatt, file_count_threshold))
                        monitor_run = False

                    # Update the query history
                    new_entry = {
                        'instrument': instrument,
                        'aperture': aperture,
                        'readpattern': self.readpatt,
                        'start_time_mjd': self.query_start,
                        'end_time_mjd': self.query_end,
                        'files_found': len(new_entries),
                        'run_monitor': monitor_run,
                        'entry_date': datetime.datetime.now()
                    }
                    self.query_table.__table__.insert().execute(new_entry)
                    logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')