예제 #1
0
def test_directory(test_dir=TEST_DIRECTORY):
    """Create a test directory for preview image.

    Parameters
    ----------
    test_dir : str
        Path to directory used for testing

    Yields
    -------
    test_dir : str
        Path to directory used for testing

    """
    # Set up local test directory
    ensure_dir_exists(test_dir)
    yield test_dir

    # Tear down local test directory and any files within
    if os.path.isdir(test_dir):
        shutil.rmtree(test_dir)

    # Empty test directory on central storage
    jpgs = glob.glob(os.path.join(get_config()['test_dir'], '*.jpg'))
    thumbs = glob.glob(os.path.join(get_config()['test_dir'], '*.thumbs'))
    for file in jpgs + thumbs:
        os.remove(file)
예제 #2
0
def make_log_file(module):
    """Create the log file name based on the module name.

    The name of the ``log_file`` is a combination of the name of the
    module being logged and the current datetime.

    Parameters
    ----------
    module : str
        The name of the module being logged.
    production_mode : bool
        Whether or not the output should be written to the production
        environment.
    path : str
        Where to write the log if user-supplied path; default to
        working dir.

    Returns
    -------
    log_file : str
        The full path to where the log file will be written to.
    """

    # Build filename
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
    filename = '{0}_{1}.log'.format(module, timestamp)

    # Determine save location
    user = pwd.getpwuid(os.getuid()).pw_name
    admin_account = get_config()['admin_account']
    log_path = get_config()['log_dir']

    # For production
    if user == admin_account and socket.gethostname()[0] == 'p':
        log_file = os.path.join(log_path, 'prod', module, filename)

    # For test
    elif user == admin_account and socket.gethostname()[0] == 't':
        log_file = os.path.join(log_path, 'test', module, filename)

    # For dev
    elif user == admin_account and socket.gethostname()[0] == 'd':
        log_file = os.path.join(log_path, 'dev', module, filename)

    # For local (also write to dev)
    else:
        log_file = os.path.join(log_path, 'dev', module, filename)

    # Make sure parent directory exists
    ensure_dir_exists(os.path.dirname(log_file))

    return log_file
예제 #3
0
    def save_mean_slope_image(self, slope_img, stdev_img, files):
        """Save the mean slope image and associated stdev image to a
        file

        Parameters
        ----------
        slope_img : numpy.ndarray
            2D array containing the mean slope image

        stdev_img : numpy.ndarray
            2D array containing the stdev image associated with the mean
            slope image.

        files : list
            List of input files used to construct the mean slope image

        Returns
        -------
        output_filename : str
            Name of fits file to save mean and stdev images within
        """

        output_filename = '{}_{}_{}_to_{}_mean_slope_image.fits'.format(
            self.instrument.lower(), self.aperture.lower(), self.query_start,
            self.query_end)

        mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor',
                                      'mean_slope_images')
        ensure_dir_exists(mean_slope_dir)
        output_filename = os.path.join(mean_slope_dir, output_filename)
        logging.info("Name of mean slope image: {}".format(output_filename))

        primary_hdu = fits.PrimaryHDU()
        primary_hdu.header['INSTRUME'] = (self.instrument, 'JWST instrument')
        primary_hdu.header['APERTURE'] = (self.aperture, 'Aperture name')
        primary_hdu.header['QRY_STRT'] = (self.query_start,
                                          'MAST Query start time (MJD)')
        primary_hdu.header['QRY_END'] = (self.query_end,
                                         'MAST Query end time (MJD)')

        files_string = 'FILES USED: '
        for filename in files:
            files_string += '{}, '.format(filename)

        primary_hdu.header.add_history(files_string)
        mean_img_hdu = fits.ImageHDU(slope_img, name='MEAN')
        stdev_img_hdu = fits.ImageHDU(stdev_img, name='STDEV')
        hdu_list = fits.HDUList([primary_hdu, mean_img_hdu, stdev_img_hdu])
        hdu_list.writeto(output_filename, overwrite=True)
        set_permissions(output_filename)

        return output_filename
예제 #4
0
def path_check():
    """Check that the ``CRDS_PATH`` environment variable is set. This
    will be the location to which CRDS reference files are downloaded.
    If the env variable is not set, default to use ``$HOME/crds_cache/``

    Returns
    -------
    crds_path : str
        Full path to the location of the CRDS reference files
    """
    crds_path = os.environ.get('CRDS_PATH')
    if crds_path is None:
        reffile_dir = '{}/crds_cache'.format(os.environ.get('HOME'))
        os.environ["CRDS_PATH"] = reffile_dir
        ensure_dir_exists(reffile_dir)
        print('CRDS_PATH environment variable not set. Setting to {}'.format(reffile_dir))
        return reffile_dir
    else:
        return crds_path
예제 #5
0
def make_log_file(module, production_mode=True, path='./'):
    """Create the log file name based on the module name.

    The name of the ``log_file`` is a combination of the name of the
    module being logged and the current datetime.

    Parameters
    ----------
    module : str
        The name of the module being logged.
    production_mode : bool
        Whether or not the output should be written to the production
        environment.
    path : str
        Where to write the log if user-supplied path; default to
        working dir.

    Returns
    -------
    log_file : str
        The full path to where the log file will be written to.
    """

    timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
    filename = '{0}_{1}.log'.format(module, timestamp)
    user = pwd.getpwuid(os.getuid()).pw_name

    settings = get_config()
    admin_account = settings['admin_account']
    log_path = settings['log_dir']

    exempt_modules = []
    if user != admin_account and module not in exempt_modules and production_mode:
        module = os.path.join('dev', module)

    if production_mode:
        log_file = os.path.join(log_path, module, filename)
    else:
        log_file = os.path.join(path, filename)

    ensure_dir_exists(os.path.dirname(log_file))

    return log_file
예제 #6
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for readnoise_monitor\n')

        # Get the output directory and setup a directory to store the data
        self.output_dir = os.path.join(get_config()['outputs'],
                                       'readnoise_monitor')
        ensure_dir_exists(os.path.join(self.output_dir, 'data'))

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in ['nircam', 'niriss']:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures for this instrument
            siaf = Siaf(self.instrument)
            possible_apertures = list(siaf.apertures)

            for aperture in possible_apertures:

                logging.info('\nWorking on aperture {} in {}'.format(
                    aperture, instrument))
                self.aperture = aperture

                # Locate the record of the most recent MAST search; use this time
                # (plus a 30 day buffer to catch any missing files from the previous
                # run) as the start time in the new MAST search.
                most_recent_search = self.most_recent_search()
                self.query_start = most_recent_search - 30

                # Query MAST for new dark files for this instrument/aperture
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))
                new_entries = mast_query_darks(instrument, aperture,
                                               self.query_start,
                                               self.query_end)
                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Set up a directory to store the data for this aperture
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                if len(new_entries) > 0:
                    ensure_dir_exists(self.data_dir)

                # Get any new files to process
                new_files = []
                checked_files = []
                for file_entry in new_entries:
                    output_filename = os.path.join(
                        self.data_dir,
                        file_entry['filename'].replace('_dark', '_uncal'))

                    # Sometimes both the dark and uncal name of a file is picked up in new_entries
                    if output_filename in checked_files:
                        logging.info(
                            '\t{} already checked in this run.'.format(
                                output_filename))
                        continue
                    checked_files.append(output_filename)

                    # Dont process files that already exist in the readnoise stats database
                    file_exists = self.file_exists_in_database(output_filename)
                    if file_exists:
                        logging.info(
                            '\t{} already exists in the readnoise database table.'
                            .format(output_filename))
                        continue

                    # Save any new uncal files with enough groups in the output directory; some dont exist in JWQL filesystem
                    try:
                        filename = filesystem_path(file_entry['filename'])
                        uncal_filename = filename.replace('_dark', '_uncal')
                        if not os.path.isfile(uncal_filename):
                            logging.info(
                                '\t{} does not exist in JWQL filesystem, even though {} does'
                                .format(uncal_filename, filename))
                        else:
                            num_groups = fits.getheader(
                                uncal_filename)['NGROUPS']
                            if num_groups > 1:  # skip processing if the file doesnt have enough groups to calculate the readnoise; TODO change to 10 before incorporating MIRI
                                shutil.copy(uncal_filename, self.data_dir)
                                logging.info('\tCopied {} to {}'.format(
                                    uncal_filename, output_filename))
                                set_permissions(output_filename)
                                new_files.append(output_filename)
                            else:
                                logging.info(
                                    '\tNot enough groups to calculate readnoise in {}'
                                    .format(uncal_filename))
                    except FileNotFoundError:
                        logging.info(
                            '\t{} does not exist in JWQL filesystem'.format(
                                file_entry['filename']))

                # Run the readnoise monitor on any new files
                if len(new_files) > 0:
                    self.process(new_files)
                    monitor_run = True
                else:
                    logging.info(
                        '\tReadnoise monitor skipped. {} new dark files for {}, {}.'
                        .format(len(new_files), instrument, aperture))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'entries_found': len(new_entries),
                    'files_found': len(new_files),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Readnoise Monitor completed successfully.')
예제 #7
0
    def run(self):
        """The main method.  See module docstrings for further details."""

        logging.info('Begin logging for bias_monitor')

        # Get the output directory and setup a directory to store the data
        self.output_dir = os.path.join(get_config()['outputs'], 'bias_monitor')
        ensure_dir_exists(os.path.join(self.output_dir, 'data'))

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in ['nircam', 'niriss', 'nirspec']:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible full-frame apertures for this instrument
            siaf = Siaf(self.instrument)
            possible_apertures = [
                aperture for aperture in siaf.apertures
                if siaf[aperture].AperType == 'FULLSCA'
            ]

            for aperture in possible_apertures:

                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))
                self.aperture = aperture

                # Locate the record of most recent MAST search; use this time
                # (plus a 30 day buffer to catch any missing files from
                # previous run) as the start time in the new MAST search.
                most_recent_search = self.most_recent_search()
                self.query_start = most_recent_search - 30

                # Query MAST for new dark files for this instrument/aperture
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))
                new_entries = monitor_utils.mast_query_darks(
                    instrument, aperture, self.query_start, self.query_end)

                # Exclude ASIC tuning data
                len_new_darks = len(new_entries)
                new_entries = monitor_utils.exclude_asic_tuning(new_entries)
                len_no_asic = len(new_entries)
                num_asic = len_new_darks - len_no_asic
                logging.info(
                    "\tFiltering out ASIC tuning files removed {} dark files.".
                    format(num_asic))

                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Set up a directory to store the data for this aperture
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                if len(new_entries) > 0:
                    ensure_dir_exists(self.data_dir)

                # Get any new files to process
                new_files = []
                for file_entry in new_entries:
                    output_filename = os.path.join(self.data_dir,
                                                   file_entry['filename'])
                    output_filename = output_filename.replace(
                        '_uncal.fits', '_uncal_0thgroup.fits').replace(
                            '_dark.fits', '_uncal_0thgroup.fits')

                    # Dont process files that already exist in the bias stats database
                    file_exists = self.file_exists_in_database(output_filename)
                    if file_exists:
                        logging.info(
                            '\t{} already exists in the bias database table.'.
                            format(output_filename))
                        continue

                    # Save the 0th group image from each new file in the output directory; some dont exist in JWQL filesystem.
                    try:
                        filename = filesystem_path(file_entry['filename'])
                        uncal_filename = filename.replace('_dark', '_uncal')
                        if not os.path.isfile(uncal_filename):
                            logging.info(
                                '\t{} does not exist in JWQL filesystem, even though {} does'
                                .format(uncal_filename, filename))
                        else:
                            new_file = self.extract_zeroth_group(
                                uncal_filename)
                            new_files.append(new_file)
                    except FileNotFoundError:
                        logging.info(
                            '\t{} does not exist in JWQL filesystem'.format(
                                file_entry['filename']))

                # Run the bias monitor on any new files
                if len(new_files) > 0:
                    self.process(new_files)
                    monitor_run = True
                else:
                    logging.info(
                        '\tBias monitor skipped. {} new dark files for {}, {}.'
                        .format(len(new_files), instrument, aperture))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'entries_found': len(new_entries),
                    'files_found': len(new_files),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Bias Monitor completed successfully.')
예제 #8
0
def get_edb_components(request):
    """Return dictionary with content needed for the EDB page.

    Parameters
    ----------
    request : HttpRequest object
        Incoming request from the webpage

    Returns
    -------
    edb_components : dict
        Dictionary with the required components

    """
    mnemonic_name_search_result = {}
    mnemonic_query_result = {}
    mnemonic_query_result_plot = None
    mnemonic_exploration_result = None

    # If this is a POST request, we need to process the form data
    if request.method == 'POST':

        if 'mnemonic_name_search' in request.POST.keys():
            # authenticate with astroquery.mast if necessary
            logged_in = log_into_mast(request)

            mnemonic_name_search_form = MnemonicSearchForm(
                request.POST,
                logged_in=logged_in,
                prefix='mnemonic_name_search')

            if mnemonic_name_search_form.is_valid():
                mnemonic_identifier = mnemonic_name_search_form[
                    'search'].value()
                if mnemonic_identifier is not None:
                    mnemonic_name_search_result = get_mnemonic_info(
                        mnemonic_identifier)

            # create forms for search fields not clicked
            mnemonic_query_form = MnemonicQueryForm(prefix='mnemonic_query')
            mnemonic_exploration_form = MnemonicExplorationForm(
                prefix='mnemonic_exploration')

        elif 'mnemonic_query' in request.POST.keys():
            # authenticate with astroquery.mast if necessary
            logged_in = log_into_mast(request)

            mnemonic_query_form = MnemonicQueryForm(request.POST,
                                                    logged_in=logged_in,
                                                    prefix='mnemonic_query')

            # proceed only if entries make sense
            if mnemonic_query_form.is_valid():
                mnemonic_identifier = mnemonic_query_form['search'].value()
                start_time = Time(mnemonic_query_form['start_time'].value(),
                                  format='iso')
                end_time = Time(mnemonic_query_form['end_time'].value(),
                                format='iso')

                if mnemonic_identifier is not None:
                    mnemonic_query_result = get_mnemonic(
                        mnemonic_identifier, start_time, end_time)
                    mnemonic_query_result_plot = mnemonic_query_result.bokeh_plot(
                    )

                    # generate table download in web app
                    result_table = mnemonic_query_result.data

                    # save file locally to be available for download
                    static_dir = os.path.join(settings.BASE_DIR, 'static')
                    ensure_dir_exists(static_dir)
                    file_name_root = 'mnemonic_query_result_table'
                    file_for_download = '{}.csv'.format(file_name_root)
                    path_for_download = os.path.join(static_dir,
                                                     file_for_download)

                    # add meta data to saved table
                    comments = []
                    comments.append(
                        'DMS EDB query of {}:'.format(mnemonic_identifier))
                    for key, value in mnemonic_query_result.info.items():
                        comments.append('{} = {}'.format(key, str(value)))
                    result_table.meta['comments'] = comments
                    comments.append(' ')
                    comments.append('Start time {}'.format(start_time.isot))
                    comments.append('End time   {}'.format(end_time.isot))
                    comments.append('Number of rows {}'.format(
                        len(result_table)))
                    comments.append(' ')
                    result_table.write(path_for_download,
                                       format='ascii.fixed_width',
                                       overwrite=True,
                                       delimiter=',',
                                       bookend=False)
                    mnemonic_query_result.file_for_download = file_for_download

            # create forms for search fields not clicked
            mnemonic_name_search_form = MnemonicSearchForm(
                prefix='mnemonic_name_search')
            mnemonic_exploration_form = MnemonicExplorationForm(
                prefix='mnemonic_exploration')

        elif 'mnemonic_exploration' in request.POST.keys():
            mnemonic_exploration_form = MnemonicExplorationForm(
                request.POST, prefix='mnemonic_exploration')
            if mnemonic_exploration_form.is_valid():
                mnemonic_exploration_result, meta = mnemonic_inventory()

                # loop over filled fields and implement simple AND logic
                for field in mnemonic_exploration_form.fields:
                    field_value = mnemonic_exploration_form[field].value()
                    if field_value != '':
                        column_name = mnemonic_exploration_form[field].label

                        # matching indices in table (case-insensitive)
                        index = [
                            i for i, item in enumerate(
                                mnemonic_exploration_result[column_name])
                            if re.search(field_value, item, re.IGNORECASE)
                        ]
                        mnemonic_exploration_result = mnemonic_exploration_result[
                            index]

                mnemonic_exploration_result.n_rows = len(
                    mnemonic_exploration_result)

                # generate tables for display and download in web app
                display_table = copy.deepcopy(mnemonic_exploration_result)

                # temporary html file,
                # see http://docs.astropy.org/en/stable/_modules/astropy/table/
                tmpdir = tempfile.mkdtemp()
                file_name_root = 'mnemonic_exploration_result_table'
                path_for_html = os.path.join(tmpdir,
                                             '{}.html'.format(file_name_root))
                with open(path_for_html, 'w') as tmp:
                    display_table.write(tmp, format='jsviewer')
                mnemonic_exploration_result.html_file_content = open(
                    path_for_html, 'r').read()

                # pass on meta data to have access to total number of mnemonics
                mnemonic_exploration_result.meta = meta

                # save file locally to be available for download
                static_dir = os.path.join(settings.BASE_DIR, 'static')
                ensure_dir_exists(static_dir)
                file_for_download = '{}.csv'.format(file_name_root)
                path_for_download = os.path.join(static_dir, file_for_download)
                display_table.write(path_for_download,
                                    format='ascii.fixed_width',
                                    overwrite=True,
                                    delimiter=',',
                                    bookend=False)
                mnemonic_exploration_result.file_for_download = file_for_download

                if mnemonic_exploration_result.n_rows == 0:
                    mnemonic_exploration_result = 'empty'

            # create forms for search fields not clicked
            mnemonic_name_search_form = MnemonicSearchForm(
                prefix='mnemonic_name_search')
            mnemonic_query_form = MnemonicQueryForm(prefix='mnemonic_query')

    else:
        mnemonic_name_search_form = MnemonicSearchForm(
            prefix='mnemonic_name_search')
        mnemonic_query_form = MnemonicQueryForm(prefix='mnemonic_query')
        mnemonic_exploration_form = MnemonicExplorationForm(
            prefix='mnemonic_exploration')

    edb_components = {
        'mnemonic_query_form': mnemonic_query_form,
        'mnemonic_query_result': mnemonic_query_result,
        'mnemonic_query_result_plot': mnemonic_query_result_plot,
        'mnemonic_name_search_form': mnemonic_name_search_form,
        'mnemonic_name_search_result': mnemonic_name_search_result,
        'mnemonic_exploration_form': mnemonic_exploration_form,
        'mnemonic_exploration_result': mnemonic_exploration_result
    }

    return edb_components
예제 #9
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [
                ap for ap in possible_apertures if ap not in apertures_to_skip
            ]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))

                # Find the appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']
                file_count_threshold = limits['Threshold'][match]

                # Locate the record of the most recent MAST search
                self.aperture = aperture
                self.query_start = self.most_recent_search()
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))

                # Query MAST using the aperture and the time of the
                # most recent previous search as the starting time
                new_entries = mast_query_darks(instrument, aperture,
                                               self.query_start,
                                               self.query_end)

                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Check to see if there are enough new files to meet the
                # monitor's signal-to-noise requirements
                if len(new_entries) >= file_count_threshold:
                    logging.info(
                        '\tSufficient new dark files found for {}, {} to run the dark monitor.'
                        .format(self.instrument, self.aperture))

                    # Get full paths to the files
                    new_filenames = []
                    for file_entry in new_entries:
                        try:
                            new_filenames.append(
                                filesystem_path(file_entry['filename']))
                        except FileNotFoundError:
                            logging.warning(
                                '\t\tUnable to locate {} in filesystem. Not including in processing.'
                                .format(file_entry['filename']))

                    # Set up directories for the copied data
                    ensure_dir_exists(os.path.join(self.output_dir, 'data'))
                    self.data_dir = os.path.join(
                        self.output_dir,
                        'data/{}_{}'.format(self.instrument.lower(),
                                            self.aperture.lower()))
                    ensure_dir_exists(self.data_dir)

                    # Copy files from filesystem
                    dark_files, not_copied = copy_files(
                        new_filenames, self.data_dir)

                    logging.info('\tNew_filenames: {}'.format(new_filenames))
                    logging.info('\tData dir: {}'.format(self.data_dir))
                    logging.info(
                        '\tCopied to working dir: {}'.format(dark_files))
                    logging.info('\tNot copied: {}'.format(not_copied))

                    # Run the dark monitor
                    self.process(dark_files)
                    monitor_run = True

                else:
                    logging.info((
                        '\tDark monitor skipped. {} new dark files for {}, {}. {} new files are '
                        'required to run dark current monitor.').format(
                            len(new_entries), instrument, aperture,
                            file_count_threshold[0]))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'files_found': len(new_entries),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')
예제 #10
0
    def run(self):
        """The main method.  See module docstrings for further details.

        There are 2 parts to the bad pixel monitor:
        1. Bad pixels from illuminated data
        2. Bad pixels from dark data

        For each, we will query MAST, copy new files from the filesystem
        and pass the list of copied files into the ``process()`` method.
        """
        logging.info('Begin logging for bad_pixel_monitor')

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'bad_pixel_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = self.get_possible_apertures()

            for aperture in possible_apertures:
                grating = None
                detector_name = None
                lamp = None

                # NIRSpec flats use the MIRROR grating.
                if self.instrument == 'nirspec':
                    grating = 'MIRROR'

                # MIRI is unlike the other instruments. We basically treat
                # the detector as the aperture name because there is no
                # aperture name for a full frame MRS exposure.
                if self.instrument == 'miri':
                    detector_name, aperture_name = aperture
                    self.aperture = detector_name
                else:
                    self.aperture = aperture
                    aperture_name = aperture

                # In flight, NIRISS plans to take darks using the LINE2 lamp
                if self.instrument == 'niriss':
                    lamp = 'LINE2'

                # What lamp is most appropriate for NIRSpec?
                if self.instrument == 'nirspec':
                    lamp = 'LINE2'

                # What lamp is most appropriate for FGS?
                #if self.instrument == 'fgs':
                #    lamp = 'G2LAMP1'

                logging.info('')
                logging.info('Working on aperture {} in {}'.format(aperture, self.instrument))

                # Find the appropriate threshold for the number of new files needed
                match = self.aperture == limits['Aperture']
                flat_file_count_threshold = limits['FlatThreshold'][match].data[0]
                dark_file_count_threshold = limits['DarkThreshold'][match].data[0]

                # Locate the record of the most recent MAST search
                self.flat_query_start = self.most_recent_search(file_type='flat')
                self.dark_query_start = self.most_recent_search(file_type='dark')
                logging.info('\tFlat field query times: {} {}'.format(self.flat_query_start, self.query_end))
                logging.info('\tDark current query times: {} {}'.format(self.dark_query_start, self.query_end))

                # Query MAST using the aperture and the time of the most
                # recent previous search as the starting time.
                flat_templates = FLAT_EXP_TYPES[instrument]
                dark_templates = DARK_EXP_TYPES[instrument]

                new_flat_entries = mast_query(instrument, flat_templates, self.flat_query_start, self.query_end,
                                              aperture=aperture_name, grating=grating, detector=detector_name,
                                              lamp=lamp)
                new_dark_entries = mast_query(instrument, dark_templates, self.dark_query_start, self.query_end,
                                              aperture=aperture_name, detector=detector_name)

                # Filter the results
                # Filtering could be different for flats vs darks.
                # Kevin says we shouldn't need to worry about mixing lamps in the data used to create the bad pixel
                # mask. In flight, data will only be taken with LINE2, LEVEL 5. Currently in MAST all lamps are
                # present, but Kevin is not concerned about variations in flat field strucutre.

                # NIRISS - results can include rate, rateints, trapsfilled
                # MIRI - Jane says they now use illuminated data for dead pixel checks, just like other insts.
                # NIRSpec - can be cal, x1d, rate, rateints. Can have both cal and x1d so filter repeats
                # FGS - rate, rateints, trapsfilled
                # NIRCam - no int flats

                # The query results can contain multiple entries for files
                # in different calibration states (or for different output
                # products), so we need to filter the list for duplicate
                # entries and for the calibration state we are interested
                # in before we know how many new entries there really are.

                # In the end, we need rate files as well as uncal files
                # because we're going to need to create jump files.
                # In order to use a given file we must have at least the
                # uncal version of the file. Get the uncal and rate file
                # lists to align.

                if new_flat_entries:
                    new_flat_entries = self.filter_query_results(new_flat_entries, datatype='flat')
                    flat_uncal_files = locate_uncal_files(new_flat_entries)
                    flat_uncal_files, run_flats = check_for_sufficient_files(flat_uncal_files, instrument, aperture, flat_file_count_threshold, 'flats')
                    flat_rate_files, flat_rate_files_to_copy = locate_rate_files(flat_uncal_files)
                else:
                    run_flats = False
                    flat_uncal_files, flat_rate_files, flat_rate_files_to_copy = None, None, None

                if new_dark_entries:
                    new_dark_entries = self.filter_query_results(new_dark_entries, datatype='dark')
                    dark_uncal_files = locate_uncal_files(new_dark_entries)
                    dark_uncal_files, run_darks = check_for_sufficient_files(dark_uncal_files, instrument, aperture, dark_file_count_threshold, 'darks')
                    dark_rate_files, dark_rate_files_to_copy = locate_rate_files(dark_uncal_files)
                else:
                    run_darks = False
                    dark_uncal_files, dark_rate_files, dark_rate_files_to_copy = None, None, None

                # Set up directories for the copied data
                ensure_dir_exists(os.path.join(self.output_dir, 'data'))
                self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower()))
                ensure_dir_exists(self.data_dir)

                # Copy files from filesystem
                if run_flats:
                    flat_uncal_files, flat_rate_files = self.map_uncal_and_rate_file_lists(flat_uncal_files, flat_rate_files, flat_rate_files_to_copy, 'flat')
                if run_darks:
                    dark_uncal_files, dark_rate_files = self.map_uncal_and_rate_file_lists(dark_uncal_files, dark_rate_files, dark_rate_files_to_copy, 'dark')

                # Run the bad pixel monitor
                if run_flats or run_darks:
                    self.process(flat_uncal_files, flat_rate_files, dark_uncal_files, dark_rate_files)

                # Update the query history
                if dark_uncal_files is None:
                    num_dark_files = 0
                else:
                    num_dark_files = len(dark_uncal_files)

                if flat_uncal_files is None:
                    num_flat_files = 0
                else:
                    num_flat_files = len(flat_uncal_files)

                new_entry = {'instrument': self.instrument.upper(),
                             'aperture': self.aperture,
                             'dark_start_time_mjd': self.dark_query_start,
                             'dark_end_time_mjd': self.query_end,
                             'flat_start_time_mjd': self.flat_query_start,
                             'flat_end_time_mjd': self.query_end,
                             'dark_files_found': num_dark_files,
                             'flat_files_found': num_flat_files,
                             'run_bpix_from_darks': run_darks,
                             'run_bpix_from_flats': run_flats,
                             'run_monitor': run_flats or run_darks,
                             'entry_date': datetime.datetime.now()}
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Bad Pixel Monitor completed successfully.')
예제 #11
0
def create_table(status_dict):
    """Create interactive ``bokeh`` table containing the logfile status
    results.

    Parameters
    ----------
    status_dict : dict
        Nested dictionary with status results from all logfiles
    """
    # Rearrange the nested dictionary into a non-nested dict for the table
    filenames = []
    dates = []
    missings = []
    results = []
    for key in status_dict:
        filenames.append(status_dict[key]['logname'])
        dates.append(datetime.fromtimestamp(status_dict[key]['latest_time']))
        missings.append(str(status_dict[key]['missing_file']))
        results.append(status_dict[key]['status'])

    # div to color the boxes in the status column
    success_template = """
    <div style="background:<%=
        (function colorfromstr(){
            if(value == "success"){
                return("green")}
            else{return("red")}
            }()) %>;
        color: white">
    <%= value %></div>
    """

    # div to color the boxes in the column for possibly late logfiles
    missing_template = """
    <div style="background:<%=
        (function colorfrombool(){
            if(value == "True"){
                return("orange")}
            else{return("green")}
            }()) %>;
        color: white">
    <%= value %></div>
    """
    success_formatter = HTMLTemplateFormatter(template=success_template)
    missing_formatter = HTMLTemplateFormatter(template=missing_template)

    data = dict(name=list(status_dict.keys()),
                filename=filenames,
                date=dates,
                missing=missings,
                result=results)
    source = ColumnDataSource(data)

    datefmt = DateFormatter(format="RFC-2822")
    columns = [
        TableColumn(field="name", title="Monitor Name", width=200),
        TableColumn(field="filename", title="Most Recent File", width=350),
        TableColumn(field="date",
                    title="Most Recent Time",
                    width=200,
                    formatter=datefmt),
        TableColumn(field="missing",
                    title="Possible Missing File",
                    width=200,
                    formatter=missing_formatter),
        TableColumn(field="result",
                    title="Status",
                    width=100,
                    formatter=success_formatter),
    ]
    data_table = DataTable(source=source,
                           columns=columns,
                           width=800,
                           height=280,
                           index_position=None)

    # Get output directory for saving the table files
    output_dir = SETTINGS['outputs']
    output_filename = 'cron_status_table'

    # verify/create output sub-directory
    output_dir = os.path.join(output_dir, 'monitor_cron_jobs')
    ensure_dir_exists(output_dir)

    # Save full html
    html_outfile = os.path.join(output_dir, '{}.html'.format(output_filename))
    output_file(html_outfile)
    save(data_table)
    try:
        set_permissions(html_outfile)
    except PermissionError:
        logging.warning(
            'Unable to set permissions for {}'.format(html_outfile))
    logging.info('Saved Bokeh full HTML file: {}'.format(html_outfile))
예제 #12
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [
                ap for ap in possible_apertures if ap not in apertures_to_skip
            ]

            # Get a list of all possible readout patterns associated with the aperture
            possible_readpatts = RAPID_READPATTERNS[instrument]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))

                # Find appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']

                # If the aperture is not listed in the threshold file, we need
                # a default
                if not np.any(match):
                    file_count_threshold = 30
                    logging.warning((
                        '\tAperture {} is not present in the threshold file. Continuing '
                        'with the default threshold of 30 files.'.format(
                            aperture)))
                else:
                    file_count_threshold = limits['Threshold'][match][0]
                self.aperture = aperture

                # We need a separate search for each readout pattern
                for readpatt in possible_readpatts:
                    self.readpatt = readpatt
                    logging.info('\tWorking on readout pattern: {}'.format(
                        self.readpatt))

                    # Locate the record of the most recent MAST search
                    self.query_start = self.most_recent_search()
                    logging.info('\tQuery times: {} {}'.format(
                        self.query_start, self.query_end))

                    # Query MAST using the aperture and the time of the
                    # most recent previous search as the starting time
                    new_entries = mast_query_darks(instrument,
                                                   aperture,
                                                   self.query_start,
                                                   self.query_end,
                                                   readpatt=self.readpatt)
                    logging.info(
                        '\tAperture: {}, Readpattern: {}, new entries: {}'.
                        format(self.aperture, self.readpatt, len(new_entries)))

                    # Check to see if there are enough new files to meet the
                    # monitor's signal-to-noise requirements
                    if len(new_entries) >= file_count_threshold:
                        logging.info(
                            '\tMAST query has returned sufficient new dark files for {}, {}, {} to run the dark monitor.'
                            .format(self.instrument, self.aperture,
                                    self.readpatt))

                        # Get full paths to the files
                        new_filenames = []
                        for file_entry in new_entries:
                            try:
                                new_filenames.append(
                                    filesystem_path(file_entry['filename']))
                            except FileNotFoundError:
                                logging.warning(
                                    '\t\tUnable to locate {} in filesystem. Not including in processing.'
                                    .format(file_entry['filename']))

                        # In some (unusual) cases, there are files in MAST with the correct aperture name
                        # but incorrect array sizes. Make sure that the new files all have the expected
                        # aperture size
                        temp_filenames = []
                        bad_size_filenames = []
                        expected_ap = Siaf(instrument)[aperture]
                        expected_xsize = expected_ap.XSciSize
                        expected_ysize = expected_ap.YSciSize
                        for new_file in new_filenames:
                            with fits.open(new_file) as hdulist:
                                xsize = hdulist[0].header['SUBSIZE1']
                                ysize = hdulist[0].header['SUBSIZE2']
                            if xsize == expected_xsize and ysize == expected_ysize:
                                temp_filenames.append(new_file)
                            else:
                                bad_size_filenames.append(new_file)
                        if len(temp_filenames) != len(new_filenames):
                            logging.info(
                                '\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: '
                            )
                            for badfile in bad_size_filenames:
                                logging.info('\t\t{}'.format(badfile))
                        new_filenames = deepcopy(temp_filenames)

                        # If it turns out that the monitor doesn't find enough
                        # of the files returned by the MAST query to meet the threshold,
                        # then the monitor will not be run
                        if len(new_filenames) < file_count_threshold:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files. "
                                "This is less than the required minimum number of files ({}) necessary to run "
                                "the monitor. Quitting.").format(
                                    len(new_filenames), file_count_threshold))
                            monitor_run = False
                        else:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files."
                            ).format(len(new_filenames)))
                            monitor_run = True

                        if monitor_run:
                            # Set up directories for the copied data
                            ensure_dir_exists(
                                os.path.join(self.output_dir, 'data'))
                            self.data_dir = os.path.join(
                                self.output_dir,
                                'data/{}_{}'.format(self.instrument.lower(),
                                                    self.aperture.lower()))
                            ensure_dir_exists(self.data_dir)

                            # Copy files from filesystem
                            dark_files, not_copied = copy_files(
                                new_filenames, self.data_dir)

                            logging.info(
                                '\tNew_filenames: {}'.format(new_filenames))
                            logging.info('\tData dir: {}'.format(
                                self.data_dir))
                            logging.info('\tCopied to working dir: {}'.format(
                                dark_files))
                            logging.info('\tNot copied: {}'.format(not_copied))

                            # Run the dark monitor
                            self.process(dark_files)

                    else:
                        logging.info((
                            '\tDark monitor skipped. MAST query has returned {} new dark files for '
                            '{}, {}, {}. {} new files are required to run dark current monitor.'
                        ).format(len(new_entries), instrument, aperture,
                                 self.readpatt, file_count_threshold))
                        monitor_run = False

                    # Update the query history
                    new_entry = {
                        'instrument': instrument,
                        'aperture': aperture,
                        'readpattern': self.readpatt,
                        'start_time_mjd': self.query_start,
                        'end_time_mjd': self.query_end,
                        'files_found': len(new_entries),
                        'run_monitor': monitor_run,
                        'entry_date': datetime.datetime.now()
                    }
                    self.query_table.__table__.insert().execute(new_entry)
                    logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')