Beispiel #1
0
def jwqldb_table_viewer(request, tablename_param=None):
    """Generate the JWQL Table Viewer view.

    Parameters
    ----------
    request : HttpRequest object
        Incoming request from the webpage

    tablename_param : str
        Table name parameter from URL

    Returns
    -------
    HttpResponse object
        Outgoing response sent to the webpage
    """

    if tablename_param is None:
        table_meta, tablename = get_jwqldb_table_view_components(request)
    else:
        table_meta = build_table(tablename_param)
        tablename = tablename_param

    _, _, engine, _ = load_connection(get_config()['connection_string'])
    all_jwql_tables = engine.table_names()

    if 'django_migrations' in all_jwql_tables:
        all_jwql_tables.remove(
            'django_migrations')  # No necessary information.

    jwql_tables_by_instrument = {}
    instruments = ['nircam', 'nirspec', 'niriss', 'miri', 'fgs']

    #  Sort tables by instrument
    for instrument in instruments:
        jwql_tables_by_instrument[instrument] = [
            tablename for tablename in all_jwql_tables
            if instrument in tablename
        ]

    # Don't forget tables that dont contain instrument specific instrument information.
    jwql_tables_by_instrument['general'] = [
        table for table in all_jwql_tables
        if not any(instrument in table for instrument in instruments)
    ]

    template = 'jwqldb_table_viewer.html'

    # If value of table_meta is None (when coming from home page)
    if table_meta is None:
        context = {'inst': '', 'all_jwql_tables': jwql_tables_by_instrument}
    # If table_meta is empty, just render table with no data.
    elif table_meta.empty:
        context = {
            'inst': '',
            'all_jwql_tables': jwql_tables_by_instrument,
            'table_columns': table_meta.columns.values,
            'table_name': tablename
        }
    # Else, everything is good to go, render the table.
    else:
        context = {
            'inst': '',
            'all_jwql_tables': jwql_tables_by_instrument,
            'table_columns': table_meta.columns.values,
            'table_rows': table_meta.values,
            'table_name': tablename
        }

    return render(request, template, context)
Beispiel #2
0
def create_table(status_dict):
    """Create interactive ``bokeh`` table containing the logfile status
    results.

    Parameters
    ----------
    status_dict : dict
        Nested dictionary with status results from all logfiles
    """
    # Rearrange the nested dictionary into a non-nested dict for the table
    filenames = []
    dates = []
    missings = []
    results = []
    for key in status_dict:
        filenames.append(status_dict[key]['logname'])
        dates.append(datetime.fromtimestamp(status_dict[key]['latest_time']))
        missings.append(str(status_dict[key]['missing_file']))
        results.append(status_dict[key]['status'])

    # div to color the boxes in the status column
    success_template = """
    <div style="background:<%=
        (function colorfromstr(){
            if(value == "success"){
                return("green")}
            else{return("red")}
            }()) %>;
        color: white">
    <%= value %></div>
    """

    # div to color the boxes in the column for possibly late logfiles
    missing_template = """
    <div style="background:<%=
        (function colorfrombool(){
            if(value == "True"){
                return("orange")}
            else{return("green")}
            }()) %>;
        color: white">
    <%= value %></div>
    """
    success_formatter = HTMLTemplateFormatter(template=success_template)
    missing_formatter = HTMLTemplateFormatter(template=missing_template)

    data = dict(name=list(status_dict.keys()), filename=filenames, date=dates, missing=missings,
                result=results)
    source = ColumnDataSource(data)

    datefmt = DateFormatter(format="RFC-2822")
    columns = [
        TableColumn(field="name", title="Monitor Name", width=200),
        TableColumn(field="filename", title="Most Recent File", width=350),
        TableColumn(field="date", title="Most Recent Time", width=200, formatter=datefmt),
        TableColumn(field="missing", title="Possible Missing File", width=200, formatter=missing_formatter),
        TableColumn(field="result", title="Status", width=100, formatter=success_formatter),
    ]
    data_table = DataTable(source=source, columns=columns, width=800, height=280, index_position=None)

    # Get output directory for saving the table files
    output_dir = get_config()['outputs']
    output_filename = 'cron_status_table'

    # Save full html
    html_outfile = os.path.join(output_dir, 'monitor_cron_jobs', '{}.html'.format(output_filename))
    output_file(html_outfile)
    save(data_table)
    try:
        set_permissions(html_outfile)
    except PermissionError:
        logging.warning('Unable to set permissions for {}'.format(html_outfile))
    logging.info('Saved Bokeh full HTML file: {}'.format(html_outfile))
Beispiel #3
0
from .data_containers import get_edb_components
from .data_containers import get_filenames_by_instrument
from .data_containers import get_header_info
from .data_containers import get_image_info
from .data_containers import get_proposal_info
from .data_containers import get_thumbnails_all_instruments
from .data_containers import nirspec_trending
from .data_containers import random_404_page
from .data_containers import get_jwqldb_table_view_components
from .data_containers import thumbnails_ajax
from .data_containers import thumbnails_query_ajax
from .forms import InstrumentAnomalySubmitForm
from .forms import AnomalyQueryForm
from .forms import FileSearchForm

FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem')


def anomaly_query(request):
    """The anomaly query form page"""

    form = AnomalyQueryForm(request.POST or None)

    if request.method == 'POST':
        if form.is_valid():
            query_configs = {}
            for instrument in ['miri', 'nirspec', 'niriss', 'nircam', 'fgs']:
                query_configs[instrument] = {}
                query_configs[instrument]['filters'] = [
                    query_unformat(i)
                    for i in form.cleaned_data['{}_filt'.format(instrument)]
Beispiel #4
0
def get_dashboard_components():
    """Build and return dictionaries containing components and html
    needed for the dashboard.

    Returns
    -------
    dashboard_components : dict
        A dictionary containing components needed for the dashboard.
    dashboard_html : dict
        A dictionary containing full HTML needed for the dashboard.
    """

    output_dir = get_config()['outputs']
    name_dict = {
        '': '',
        'monitor_mast': 'Database Monitor',
        'monitor_filesystem': 'Filesystem Monitor'
    }

    # Run the cron job monitor to produce an updated table
    monitor_cron_jobs.status(production_mode=True)

    # Build dictionary of Bokeh components from files in the output directory
    dashboard_components = {}
    for dir_name, _, file_list in os.walk(output_dir):
        monitor_name = os.path.basename(dir_name)

        # Only continue if the dashboard knows how to build that monitor
        if monitor_name in name_dict.keys():
            formatted_monitor_name = name_dict[monitor_name]
            dashboard_components[formatted_monitor_name] = {}
            for fname in file_list:
                if 'component' in fname:
                    full_fname = '{}/{}'.format(monitor_name, fname)
                    plot_name = fname.split('_component')[0]

                    # Generate formatted plot name
                    formatted_plot_name = plot_name.title().replace('_', ' ')
                    for lowercase, mixed_case in JWST_INSTRUMENT_NAMES_MIXEDCASE.items(
                    ):
                        formatted_plot_name = formatted_plot_name.replace(
                            lowercase.capitalize(), mixed_case)
                    formatted_plot_name = formatted_plot_name.replace(
                        'Jwst', 'JWST')
                    formatted_plot_name = formatted_plot_name.replace(
                        'Caom', 'CAOM')

                    # Get the div
                    html_file = full_fname.split('.')[0] + '.html'
                    with open(os.path.join(output_dir, html_file), 'r') as f:
                        div = f.read()

                    # Get the script
                    js_file = full_fname.split('.')[0] + '.js'
                    with open(os.path.join(output_dir, js_file), 'r') as f:
                        script = f.read()

                    # Save to dictionary
                    dashboard_components[formatted_monitor_name][
                        formatted_plot_name] = [div, script]

    # Add HTML that cannot be saved as components to the dictionary
    with open(
            os.path.join(output_dir, 'monitor_cron_jobs',
                         'cron_status_table.html'), 'r') as f:
        cron_status_table_html = f.read()
    dashboard_html = {}
    dashboard_html['Cron Job Monitor'] = cron_status_table_html

    return dashboard_components, dashboard_html
Beispiel #5
0
    def run(self):
        """The main method.  See module docstrings for further details.

        There are 2 parts to the bad pixel monitor:
        1. Bad pixels from illuminated data
        2. Bad pixels from dark data

        For each, we will query MAST, copy new files from the filesystem
        and pass the list of copied files into the ``process()`` method.
        """
        logging.info('Begin logging for bad_pixel_monitor')

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'],
                                       'bad_pixel_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = self.get_possible_apertures()

            for aperture in possible_apertures:
                grating = None
                detector_name = None
                lamp = None

                # NIRSpec flats use the MIRROR grating.
                if self.instrument == 'nirspec':
                    grating = 'MIRROR'

                # MIRI is unlike the other instruments. We basically treat
                # the detector as the aperture name because there is no
                # aperture name for a full frame MRS exposure.
                if self.instrument == 'miri':
                    detector_name, aperture_name = aperture
                    self.aperture = detector_name
                else:
                    self.aperture = aperture
                    aperture_name = aperture

                # In flight, NIRISS plans to take darks using the LINE2 lamp
                if self.instrument == 'niriss':
                    lamp = 'LINE2'

                # What lamp is most appropriate for NIRSpec?
                if self.instrument == 'nirspec':
                    lamp = 'LINE2'

                # What lamp is most appropriate for FGS?
                # if self.instrument == 'fgs':
                #    lamp = 'G2LAMP1'

                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, self.instrument))

                # Find the appropriate threshold for number of new files needed
                match = self.aperture == limits['Aperture']
                flat_file_count_threshold = limits['FlatThreshold'][
                    match].data[0]
                dark_file_count_threshold = limits['DarkThreshold'][
                    match].data[0]

                # Locate the record of the most recent MAST search
                self.flat_query_start = self.most_recent_search(
                    file_type='flat')
                self.dark_query_start = self.most_recent_search(
                    file_type='dark')
                logging.info('\tFlat field query times: {} {}'.format(
                    self.flat_query_start, self.query_end))
                logging.info('\tDark current query times: {} {}'.format(
                    self.dark_query_start, self.query_end))

                # Query MAST using the aperture and the time of the most
                # recent previous search as the starting time.
                flat_templates = FLAT_EXP_TYPES[instrument]
                dark_templates = DARK_EXP_TYPES[instrument]

                new_flat_entries = mast_query(instrument,
                                              flat_templates,
                                              self.flat_query_start,
                                              self.query_end,
                                              aperture=aperture_name,
                                              grating=grating,
                                              detector=detector_name,
                                              lamp=lamp)
                new_dark_entries = mast_query(instrument,
                                              dark_templates,
                                              self.dark_query_start,
                                              self.query_end,
                                              aperture=aperture_name,
                                              detector=detector_name)

                # Filter the results
                # Filtering could be different for flats vs darks.
                # Kevin says we shouldn't need to worry about mixing lamps in
                # the data used to create the bad pixel mask.
                # In flight, data will only be taken with LINE2, LEVEL 5.
                # Currently in MAST all lamps are present, but Kevin is
                # not concerned about variations in flat field strucutre.

                # NIRISS - results can include rate, rateints, trapsfilled
                # MIRI - Jane says they now use illuminated data for dead pixel checks, just like other insts.
                # NIRSpec - can be cal, x1d, rate, rateints. Can have both cal and x1d so filter repeats
                # FGS - rate, rateints, trapsfilled
                # NIRCam - no int flats

                # The query results can contain multiple entries for files
                # in different calibration states (or for different output
                # products), so we need to filter the list for duplicate
                # entries and for the calibration state we are interested
                # in before we know how many new entries there really are.

                # In the end, we need rate files as well as uncal files
                # because we're going to need to create jump files.
                # In order to use a given file we must have at least the
                # uncal version of the file. Get the uncal and rate file
                # lists to align.

                if new_flat_entries:
                    # Exclude ASIC tuning data
                    len_new_flats = len(new_flat_entries)
                    new_flat_entries = monitor_utils.exclude_asic_tuning(
                        new_flat_entries)
                    len_no_asic = len(new_flat_entries)
                    num_asic = len_new_flats - len_no_asic
                    logging.info(
                        "\tFiltering out ASIC tuning files removed {} flat files."
                        .format(num_asic))

                    new_flat_entries = self.filter_query_results(
                        new_flat_entries, datatype='flat')
                    apcheck_flat_entries = pipeline_tools.aperture_size_check(
                        new_flat_entries, instrument, aperture)
                    lost_to_bad_metadata = len(new_flat_entries) - len(
                        apcheck_flat_entries)
                    logging.info(
                        '\t{} flat field files ignored due to inconsistency in array size and metadata.'
                        .format(lost_to_bad_metadata))
                    flat_uncal_files = locate_uncal_files(apcheck_flat_entries)
                    flat_uncal_files, run_flats = check_for_sufficient_files(
                        flat_uncal_files, instrument, aperture,
                        flat_file_count_threshold, 'flats')
                    flat_rate_files, flat_rate_files_to_copy = locate_rate_files(
                        flat_uncal_files)
                else:
                    run_flats = False
                    flat_uncal_files, flat_rate_files, flat_rate_files_to_copy = None, None, None

                if new_dark_entries:
                    # Exclude ASIC tuning data
                    len_new_darks = len(new_dark_entries)
                    new_dark_entries = monitor_utils.exclude_asic_tuning(
                        new_dark_entries)
                    len_no_asic = len(new_dark_entries)
                    num_asic = len_new_darks - len_no_asic
                    logging.info(
                        "\tFiltering out ASIC tuning files removed {} dark files."
                        .format(num_asic))

                    new_dark_entries = self.filter_query_results(
                        new_dark_entries, datatype='dark')
                    apcheck_dark_entries = pipeline_tools.aperture_size_check(
                        new_dark_entries, instrument, aperture)
                    lost_to_bad_metadata = len(new_dark_entries) - len(
                        apcheck_dark_entries)
                    logging.info(
                        '\t{} dark files ignored due to inconsistency in array size and metadata.'
                        .format(lost_to_bad_metadata))
                    dark_uncal_files = locate_uncal_files(apcheck_dark_entries)
                    dark_uncal_files, run_darks = check_for_sufficient_files(
                        dark_uncal_files, instrument, aperture,
                        dark_file_count_threshold, 'darks')
                    dark_rate_files, dark_rate_files_to_copy = locate_rate_files(
                        dark_uncal_files)
                else:
                    run_darks = False
                    dark_uncal_files, dark_rate_files, dark_rate_files_to_copy = None, None, None

                # Set up directories for the copied data
                ensure_dir_exists(os.path.join(self.output_dir, 'data'))
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                ensure_dir_exists(self.data_dir)

                # Copy files from filesystem
                if run_flats:
                    flat_uncal_files, flat_rate_files = self.map_uncal_and_rate_file_lists(
                        flat_uncal_files, flat_rate_files,
                        flat_rate_files_to_copy, 'flat')
                if run_darks:
                    dark_uncal_files, dark_rate_files = self.map_uncal_and_rate_file_lists(
                        dark_uncal_files, dark_rate_files,
                        dark_rate_files_to_copy, 'dark')

                # Run the bad pixel monitor
                if run_flats or run_darks:
                    self.process(flat_uncal_files, flat_rate_files,
                                 dark_uncal_files, dark_rate_files)

                # Update the query history
                if dark_uncal_files is None:
                    num_dark_files = 0
                else:
                    num_dark_files = len(dark_uncal_files)

                if flat_uncal_files is None:
                    num_flat_files = 0
                else:
                    num_flat_files = len(flat_uncal_files)

                new_entry = {
                    'instrument': self.instrument.upper(),
                    'aperture': self.aperture,
                    'dark_start_time_mjd': self.dark_query_start,
                    'dark_end_time_mjd': self.query_end,
                    'flat_start_time_mjd': self.flat_query_start,
                    'flat_end_time_mjd': self.query_end,
                    'dark_files_found': num_dark_files,
                    'flat_files_found': num_flat_files,
                    'run_bpix_from_darks': run_darks,
                    'run_bpix_from_flats': run_flats,
                    'run_monitor': run_flats or run_darks,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Bad Pixel Monitor completed successfully.')
Beispiel #6
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(aperture, instrument))

                # Find the appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']
                file_count_threshold = limits['Threshold'][match]

                # Locate the record of the most recent MAST search
                self.aperture = aperture
                self.query_start = self.most_recent_search()
                logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end))

                # Query MAST using the aperture and the time of the
                # most recent previous search as the starting time
                new_entries = mast_query_darks(instrument, aperture, self.query_start, self.query_end)

                logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries)))

                # Check to see if there are enough new files to meet the
                # monitor's signal-to-noise requirements
                if len(new_entries) >= file_count_threshold:
                    logging.info('\tSufficient new dark files found for {}, {} to run the dark monitor.'
                                 .format(self.instrument, self.aperture))

                    # Get full paths to the files
                    new_filenames = []
                    for file_entry in new_entries:
                        try:
                            new_filenames.append(filesystem_path(file_entry['filename']))
                        except FileNotFoundError:
                            logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.'
                                            .format(file_entry['filename']))

                    # Set up directories for the copied data
                    ensure_dir_exists(os.path.join(self.output_dir, 'data'))
                    self.data_dir = os.path.join(self.output_dir,
                                                 'data/{}_{}'.format(self.instrument.lower(),
                                                                     self.aperture.lower()))
                    ensure_dir_exists(self.data_dir)

                    # Copy files from filesystem
                    dark_files, not_copied = copy_files(new_filenames, self.data_dir)

                    logging.info('\tNew_filenames: {}'.format(new_filenames))
                    logging.info('\tData dir: {}'.format(self.data_dir))
                    logging.info('\tCopied to working dir: {}'.format(dark_files))
                    logging.info('\tNot copied: {}'.format(not_copied))

                    # Run the dark monitor
                    self.process(dark_files)
                    monitor_run = True

                else:
                    logging.info(('\tDark monitor skipped. {} new dark files for {}, {}. {} new files are '
                                  'required to run dark current monitor.').format(
                        len(new_entries), instrument, aperture, file_count_threshold[0]))
                    monitor_run = False

                # Update the query history
                new_entry = {'instrument': instrument,
                             'aperture': aperture,
                             'start_time_mjd': self.query_start,
                             'end_time_mjd': self.query_end,
                             'files_found': len(new_entries),
                             'run_monitor': monitor_run,
                             'entry_date': datetime.datetime.now()}
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')
Beispiel #7
0
def test_get_config():
    """Assert that the ``get_config`` function successfully creates a
    dictionary.
    """
    settings = get_config()
    assert isinstance(settings, dict)
Beispiel #8
0
import os
import glob
import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn
import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.sql_interface as sql
import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.csv_to_AstropyTable as apt
from jwql.utils.utils import get_config, filename_parser

from astropy.table import Table, Column

from jwql.instrument_monitors.nirspec_monitors.data_trending.utils.process_data import once_a_day_routine

__location__ = os.path.realpath(
    os.path.join(os.getcwd(), os.path.dirname(__file__)))

#point to the directory where your files are located!
directory = os.path.join(get_config()['outputs'], 'nirspec_data_trending',
                         'nirspec_new_15min', '*.CSV')

#here some some files contain the same data but they are all incomplete
#in order to generate a full database we have to import all of them
filenames = glob.glob(directory)


def process_file(conn, path):
    '''Parse CSV file, process data within and put to DB
    Parameters
    ----------
    conn : DBobject
        Connection object to temporary database
    path : str
        defines path to the files
Beispiel #9
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for readnoise_monitor\n')

        # Get the output directory and setup a directory to store the data
        self.output_dir = os.path.join(get_config()['outputs'],
                                       'readnoise_monitor')
        ensure_dir_exists(os.path.join(self.output_dir, 'data'))

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures for this instrument
            siaf = Siaf(self.instrument)
            possible_apertures = list(siaf.apertures)

            for aperture in possible_apertures:

                logging.info('\nWorking on aperture {} in {}'.format(
                    aperture, instrument))
                self.aperture = aperture

                # Locate the record of the most recent MAST search; use this time
                # (plus a buffer to catch any missing files from the previous
                # run) as the start time in the new MAST search.
                most_recent_search = self.most_recent_search()
                self.query_start = most_recent_search - 70

                # Query MAST for new dark files for this instrument/aperture
                logging.info('\tQuery times: {} {}'.format(
                    self.query_start, self.query_end))
                new_entries = monitor_utils.mast_query_darks(
                    instrument, aperture, self.query_start, self.query_end)

                # Exclude ASIC tuning data
                len_new_darks = len(new_entries)
                new_entries = monitor_utils.exclude_asic_tuning(new_entries)
                len_no_asic = len(new_entries)
                num_asic = len_new_darks - len_no_asic
                logging.info(
                    "\tFiltering out ASIC tuning files removed {} dark files.".
                    format(num_asic))

                logging.info('\tAperture: {}, new entries: {}'.format(
                    self.aperture, len(new_entries)))

                # Set up a directory to store the data for this aperture
                self.data_dir = os.path.join(
                    self.output_dir,
                    'data/{}_{}'.format(self.instrument.lower(),
                                        self.aperture.lower()))
                if len(new_entries) > 0:
                    ensure_dir_exists(self.data_dir)

                # Get any new files to process
                new_files = []
                checked_files = []
                for file_entry in new_entries:
                    output_filename = os.path.join(
                        self.data_dir,
                        file_entry['filename'].replace('_dark', '_uncal'))

                    # Sometimes both the dark and uncal name of a file is picked up in new_entries
                    if output_filename in checked_files:
                        logging.info(
                            '\t{} already checked in this run.'.format(
                                output_filename))
                        continue
                    checked_files.append(output_filename)

                    # Dont process files that already exist in the readnoise stats database
                    file_exists = self.file_exists_in_database(output_filename)
                    if file_exists:
                        logging.info(
                            '\t{} already exists in the readnoise database table.'
                            .format(output_filename))
                        continue

                    # Save any new uncal files with enough groups in the output directory; some dont exist in JWQL filesystem
                    try:
                        filename = filesystem_path(file_entry['filename'])
                        uncal_filename = filename.replace('_dark', '_uncal')
                        if not os.path.isfile(uncal_filename):
                            logging.info(
                                '\t{} does not exist in JWQL filesystem, even though {} does'
                                .format(uncal_filename, filename))
                        else:
                            num_groups = fits.getheader(
                                uncal_filename)['NGROUPS']
                            num_ints = fits.getheader(uncal_filename)['NINTS']
                            if instrument == 'miri':
                                total_cds_frames = int(
                                    (num_groups - 6) / 2) * num_ints
                            else:
                                total_cds_frames = int(
                                    num_groups / 2) * num_ints
                            # Skip processing if the file doesnt have enough groups/ints to calculate the readnoise.
                            # MIRI needs extra since they omit the first five and last group before calculating the readnoise.
                            if total_cds_frames >= 10:
                                shutil.copy(uncal_filename, self.data_dir)
                                logging.info('\tCopied {} to {}'.format(
                                    uncal_filename, output_filename))
                                set_permissions(output_filename)
                                new_files.append(output_filename)
                            else:
                                logging.info(
                                    '\tNot enough groups/ints to calculate readnoise in {}'
                                    .format(uncal_filename))
                    except FileNotFoundError:
                        logging.info(
                            '\t{} does not exist in JWQL filesystem'.format(
                                file_entry['filename']))

                # Run the readnoise monitor on any new files
                if len(new_files) > 0:
                    self.process(new_files)
                    monitor_run = True
                else:
                    logging.info(
                        '\tReadnoise monitor skipped. {} new dark files for {}, {}.'
                        .format(len(new_files), instrument, aperture))
                    monitor_run = False

                # Update the query history
                new_entry = {
                    'instrument': instrument,
                    'aperture': aperture,
                    'start_time_mjd': self.query_start,
                    'end_time_mjd': self.query_end,
                    'entries_found': len(new_entries),
                    'files_found': len(new_files),
                    'run_monitor': monitor_run,
                    'entry_date': datetime.datetime.now()
                }
                self.query_table.__table__.insert().execute(new_entry)
                logging.info('\tUpdated the query history table')

        logging.info('Readnoise Monitor completed successfully.')
Beispiel #10
0
from bokeh.models.tickers import LogTicker
from datetime import datetime
import numpy as np

from jwql.database.database_interface import session
from jwql.database.database_interface import NIRCamDarkQueryHistory, NIRCamDarkPixelStats, NIRCamDarkDarkCurrent
from jwql.database.database_interface import NIRISSDarkQueryHistory, NIRISSDarkPixelStats, NIRISSDarkDarkCurrent
from jwql.database.database_interface import MIRIDarkQueryHistory, MIRIDarkPixelStats, MIRIDarkDarkCurrent
from jwql.database.database_interface import NIRSpecDarkQueryHistory, NIRSpecDarkPixelStats, NIRSpecDarkDarkCurrent
from jwql.database.database_interface import FGSDarkQueryHistory, FGSDarkPixelStats, FGSDarkDarkCurrent
from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
from jwql.utils.utils import get_config
from jwql.bokeh_templating import BokehTemplate

SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
OUTPUTS_DIR = get_config()['outputs']


class DarkMonitor(BokehTemplate):

    # Combine instrument and aperture into a single property because we
    # do not want to invoke the setter unless both are updated
    @property
    def aperture_info(self):
        return (self._instrument, self._aperture)

    @aperture_info.setter
    def aperture_info(self, info):
        self._instrument, self._aperture = info
        self.pre_init()
        self.post_init()
Beispiel #11
0
    def run(self):
        """The main method.  See module docstrings for further
        details.
        """

        logging.info('Begin logging for dark_monitor')

        apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL']

        # Get the output directory
        self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor')

        # Read in config file that defines the thresholds for the number
        # of dark files that must be present in order for the monitor to run
        limits = ascii.read(THRESHOLDS_FILE)

        # Use the current time as the end time for MAST query
        self.query_end = Time.now().mjd

        # Loop over all instruments
        for instrument in JWST_INSTRUMENT_NAMES:
            self.instrument = instrument

            # Identify which database tables to use
            self.identify_tables()

            # Get a list of all possible apertures from pysiaf
            possible_apertures = list(Siaf(instrument).apernames)
            possible_apertures = [
                ap for ap in possible_apertures if ap not in apertures_to_skip
            ]

            # Get a list of all possible readout patterns associated with the aperture
            possible_readpatts = RAPID_READPATTERNS[instrument]

            for aperture in possible_apertures:
                logging.info('')
                logging.info('Working on aperture {} in {}'.format(
                    aperture, instrument))

                # Find appropriate threshold for the number of new files needed
                match = aperture == limits['Aperture']

                # If the aperture is not listed in the threshold file, we need
                # a default
                if not np.any(match):
                    file_count_threshold = 30
                    logging.warning((
                        '\tAperture {} is not present in the threshold file. Continuing '
                        'with the default threshold of 30 files.'.format(
                            aperture)))
                else:
                    file_count_threshold = limits['Threshold'][match][0]
                self.aperture = aperture

                # We need a separate search for each readout pattern
                for readpatt in possible_readpatts:
                    self.readpatt = readpatt
                    logging.info('\tWorking on readout pattern: {}'.format(
                        self.readpatt))

                    # Locate the record of the most recent MAST search
                    self.query_start = self.most_recent_search()
                    logging.info('\tQuery times: {} {}'.format(
                        self.query_start, self.query_end))

                    # Query MAST using the aperture and the time of the
                    # most recent previous search as the starting time
                    new_entries = mast_query_darks(instrument,
                                                   aperture,
                                                   self.query_start,
                                                   self.query_end,
                                                   readpatt=self.readpatt)
                    logging.info(
                        '\tAperture: {}, Readpattern: {}, new entries: {}'.
                        format(self.aperture, self.readpatt, len(new_entries)))

                    # Check to see if there are enough new files to meet the
                    # monitor's signal-to-noise requirements
                    if len(new_entries) >= file_count_threshold:
                        logging.info(
                            '\tMAST query has returned sufficient new dark files for {}, {}, {} to run the dark monitor.'
                            .format(self.instrument, self.aperture,
                                    self.readpatt))

                        # Get full paths to the files
                        new_filenames = []
                        for file_entry in new_entries:
                            try:
                                new_filenames.append(
                                    filesystem_path(file_entry['filename']))
                            except FileNotFoundError:
                                logging.warning(
                                    '\t\tUnable to locate {} in filesystem. Not including in processing.'
                                    .format(file_entry['filename']))

                        # In some (unusual) cases, there are files in MAST with the correct aperture name
                        # but incorrect array sizes. Make sure that the new files all have the expected
                        # aperture size
                        temp_filenames = []
                        bad_size_filenames = []
                        expected_ap = Siaf(instrument)[aperture]
                        expected_xsize = expected_ap.XSciSize
                        expected_ysize = expected_ap.YSciSize
                        for new_file in new_filenames:
                            with fits.open(new_file) as hdulist:
                                xsize = hdulist[0].header['SUBSIZE1']
                                ysize = hdulist[0].header['SUBSIZE2']
                            if xsize == expected_xsize and ysize == expected_ysize:
                                temp_filenames.append(new_file)
                            else:
                                bad_size_filenames.append(new_file)
                        if len(temp_filenames) != len(new_filenames):
                            logging.info(
                                '\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: '
                            )
                            for badfile in bad_size_filenames:
                                logging.info('\t\t{}'.format(badfile))
                        new_filenames = deepcopy(temp_filenames)

                        # If it turns out that the monitor doesn't find enough
                        # of the files returned by the MAST query to meet the threshold,
                        # then the monitor will not be run
                        if len(new_filenames) < file_count_threshold:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files. "
                                "This is less than the required minimum number of files ({}) necessary to run "
                                "the monitor. Quitting.").format(
                                    len(new_filenames), file_count_threshold))
                            monitor_run = False
                        else:
                            logging.info((
                                "\tFilesystem search for the files identified by MAST has returned {} files."
                            ).format(len(new_filenames)))
                            monitor_run = True

                        if monitor_run:
                            # Set up directories for the copied data
                            ensure_dir_exists(
                                os.path.join(self.output_dir, 'data'))
                            self.data_dir = os.path.join(
                                self.output_dir,
                                'data/{}_{}'.format(self.instrument.lower(),
                                                    self.aperture.lower()))
                            ensure_dir_exists(self.data_dir)

                            # Copy files from filesystem
                            dark_files, not_copied = copy_files(
                                new_filenames, self.data_dir)

                            logging.info(
                                '\tNew_filenames: {}'.format(new_filenames))
                            logging.info('\tData dir: {}'.format(
                                self.data_dir))
                            logging.info('\tCopied to working dir: {}'.format(
                                dark_files))
                            logging.info('\tNot copied: {}'.format(not_copied))

                            # Run the dark monitor
                            self.process(dark_files)

                    else:
                        logging.info((
                            '\tDark monitor skipped. MAST query has returned {} new dark files for '
                            '{}, {}, {}. {} new files are required to run dark current monitor.'
                        ).format(len(new_entries), instrument, aperture,
                                 self.readpatt, file_count_threshold))
                        monitor_run = False

                    # Update the query history
                    new_entry = {
                        'instrument': instrument,
                        'aperture': aperture,
                        'readpattern': self.readpatt,
                        'start_time_mjd': self.query_start,
                        'end_time_mjd': self.query_end,
                        'files_found': len(new_entries),
                        'run_monitor': monitor_run,
                        'entry_date': datetime.datetime.now()
                    }
                    self.query_table.__table__.insert().execute(new_entry)
                    logging.info('\tUpdated the query history table')

        logging.info('Dark Monitor completed successfully.')
Beispiel #12
0
    def clean_search(self):
        """Validate the "search" field.

        Check that the input is either a proposal or fileroot, and one
        that matches files in the filesystem.

        Returns
        -------
        str
            The cleaned data input into the "search" field
        """

        # Get the cleaned search data
        search = self.cleaned_data['search']

        # Make sure the search is either a proposal or fileroot
        if search.isnumeric() and 1 < int(search) < 99999:
            self.search_type = 'proposal'
        elif self._search_is_fileroot(search):
            self.search_type = 'fileroot'
        else:
            raise forms.ValidationError('Invalid search term {}. Please provide proposal number '
                                        'or file root.'.format(search))

        # If they searched for a proposal...
        if self.search_type == 'proposal':
            # See if there are any matching proposals and, if so, what
            # instrument they are for
            proposal_string = '{:05d}'.format(int(search))
            search_string_public = os.path.join(get_config()['filesystem'], 'public', 'jw{}'.format(proposal_string), '*', '*{}*.fits'.format(proposal_string))
            search_string_proprietary = os.path.join(get_config()['filesystem'], 'proprietary', 'jw{}'.format(proposal_string), '*', '*{}*.fits'.format(proposal_string))
            all_files = glob.glob(search_string_public)
            all_files.extend(glob.glob(search_string_proprietary))

            # Ignore "original" files
            all_files = [filename for filename in all_files if 'original' not in filename]

            if len(all_files) > 0:
                all_instruments = []
                for file in all_files:
                    instrument = filename_parser(file)['instrument']
                    all_instruments.append(instrument)
                if len(set(all_instruments)) > 1:
                    raise forms.ValidationError('Cannot return result for proposal with multiple instruments ({}).'.format(', '.join(set(all_instruments))))

                self.instrument = all_instruments[0]
            else:
                raise forms.ValidationError('Proposal {} not in the filesystem.'.format(search))

        # If they searched for a fileroot...
        elif self.search_type == 'fileroot':
            # See if there are any matching fileroots and, if so, what instrument they are for
            search_string_public = os.path.join(get_config()['filesystem'], 'public', search[:7], search[:13], '{}*.fits'.format(search))
            search_string_proprietary = os.path.join(get_config()['filesystem'], 'proprietary', search[:7], search[:13], '{}*.fits'.format(search))
            all_files = glob.glob(search_string_public)
            all_files.extend(glob.glob(search_string_proprietary))

            # Ignore "original" files
            all_files = [filename for filename in all_files if 'original' not in filename]

            if len(all_files) == 0:
                raise forms.ValidationError('Fileroot {} not in the filesystem.'.format(search))

            instrument = search.split('_')[-1][:3]
            self.instrument = JWST_INSTRUMENT_NAMES_SHORTHAND[instrument]

        return self.cleaned_data['search']
Beispiel #13
0
def get_dashboard_components():
    """Build and return dictionaries containing components and html
    needed for the dashboard.

    Returns
    -------
    dashboard_components : dict
        A dictionary containing components needed for the dashboard.
    dashboard_html : dict
        A dictionary containing full HTML needed for the dashboard.
    """

    output_dir = get_config()['outputs']
    name_dict = {
        '': '',
        'monitor_mast': 'Database Monitor',
        'database_monitor_jwst': 'JWST',
        'database_monitor_caom': 'JWST (CAOM)',
        'monitor_filesystem': 'Filesystem Monitor',
        'filecount_type': 'Total File Counts by Type',
        'size_type': 'Total File Sizes by Type',
        'filecount': 'Total File Counts',
        'system_stats': 'System Statistics'
    }

    # Exclude monitors that can't be saved as components
    exclude_list = ['monitor_cron_jobs']

    # Run the cron job monitor to produce an updated table
    monitor_cron_jobs.status(production_mode=True)

    # Build dictionary of components
    dashboard_components = {}
    for dir_name, subdir_list, file_list in os.walk(output_dir):
        monitor_name = os.path.basename(dir_name)
        if monitor_name not in exclude_list:
            dashboard_components[name_dict[monitor_name]] = {}
            for fname in file_list:
                if 'component' in fname:
                    full_fname = '{}/{}'.format(monitor_name, fname)
                    plot_name = fname.split('_component')[0]

                    # Get the div
                    html_file = full_fname.split('.')[0] + '.html'
                    with open(os.path.join(output_dir, html_file)) as f:
                        div = f.read()

                    # Get the script
                    js_file = full_fname.split('.')[0] + '.js'
                    with open(os.path.join(output_dir, js_file)) as f:
                        script = f.read()
                    dashboard_components[name_dict[monitor_name]][
                        name_dict[plot_name]] = [div, script]

    # Add HTML that cannot be saved as components to the dictionary
    with open(
            os.path.join(output_dir, 'monitor_cron_jobs',
                         'cron_status_table.html')) as f:
        cron_status_table_html = f.read()
    dashboard_html = {}
    dashboard_html['Cron Job Monitor'] = cron_status_table_html

    return dashboard_components, dashboard_html
Beispiel #14
0
import os
import pkg_resources
from jwql.utils import utils

module_path = pkg_resources.resource_filename('jwql', '')
setup_path = os.path.normpath(os.path.join(module_path, '../setup.py'))

try:
    with open(setup_path) as f:
        data = f.readlines()

    for line in data:
        if 'VERSION =' in line:
            __version__ = line.split(' ')[-1].replace("'", "").strip()

    config_version = utils.get_config()['jwql_version']
    if __version__ != config_version:
        print(
            "Warning: config file JWQL version is {}, while JWQL is using {}".
            format(config_version, __version__))

except FileNotFoundError:
    print('Could not determine jwql version')
    __version__ = '0.0.0'
Beispiel #15
0
from bokeh.embed import components
from bokeh.layouts import gridplot
from bokeh.palettes import Category20_20 as palette
from bokeh.plotting import figure, output_file, save

from jwql.database.database_interface import engine
from jwql.database.database_interface import session
from jwql.database.database_interface import FilesystemGeneral
from jwql.database.database_interface import FilesystemInstrument
from jwql.utils.logging_functions import configure_logging, log_info, log_fail
from jwql.utils.permissions import set_permissions
from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE
from jwql.utils.utils import filename_parser
from jwql.utils.utils import get_config

FILESYSTEM = get_config()['filesystem']


def gather_statistics(general_results_dict, instrument_results_dict):
    """Walks the filesytem to gather various statistics to eventually
    store in the database

    Parameters
    ----------
    general_results_dict : dict
        A dictionary for the ``filesystem_general`` database table
    instrument_results_dict : dict
        A dictionary for the ``filesystem_instrument`` database table

    Returns
    -------
Beispiel #16
0
def monitor_template_main():
    """ The main function of the ``monitor_template`` module."""

    # Example of logging
    my_variable = 'foo'
    logging.info('Some useful information: {}'.format(my_variable))

    # Example of querying for a dataset via MAST API
    service = "Mast.Jwst.Filtered.Niriss"
    params = {
        "columns": "filename",
        "filters": [{
            "paramName": "filter",
            "values": ['F430M']
        }]
    }
    response = Mast.service_request_async(service, params)
    result = response[0].json()['data']
    filename_of_interest = result[0][
        'filename']  # jw00304002001_02102_00001_nis_uncal.fits

    # Example of parsing a filename
    filename_dict = filename_parser(filename_of_interest)
    # Contents of filename_dict:
    #     {'program_id': '00304',
    #      'observation': '002',
    #      'visit': '001',
    #      'visit_group': '02',
    #      'parallel_seq_id': '1',
    #      'activity': '02',
    #      'exposure_id': '00001',
    #      'detector': 'nis',
    #      'suffix': 'uncal'}

    # Example of locating a dataset in the filesystem
    filesystem = get_config()['filesystem']
    dataset = os.path.join(filesystem,
                           'jw{}'.format(filename_dict['program_id']),
                           filename_of_interest)

    # Example of reading in dataset using jwst.datamodels
    im = datamodels.open(dataset)
    # Now have access to:
    #     im.data  # Data array
    #     im.err  # ERR array
    #     im.meta  # Metadata such as header keywords

    # Example of saving a file and setting permissions
    im.save('some_filename.fits')
    set_permissions('some_filename.fits')

    # Example of creating and exporting a Bokeh plot
    plt = Donut(im.data, plot_width=600, plot_height=600)
    plt.sizing_mode = 'stretch_both'  # Necessary for responsive sizing on web app
    script, div = components(plt)

    plot_output_dir = get_config()['outputs']
    div_outfile = os.path.join(plot_output_dir, 'monitor_name',
                               filename_of_interest + "_component.html")
    script_outfile = os.path.join(plot_output_dir, 'monitor_name',
                                  filename_of_interest + "_component.js")

    for outfile, component in zip([div_outfile, script_outfile],
                                  [div, script]):
        with open(outfile, 'w') as f:
            f.write(component)
            f.close()
        set_permissions(outfile)

    # Perform any other necessary code
    well_named_variable = "Function does something."
    result_of_second_function = second_function(well_named_variable)
Beispiel #17
0
def generate_preview_images():
    """The main function of the ``generate_preview_image`` module."""

    # Begin logging
    logging.info("Beginning the script run")

    filesystem = get_config()['filesystem']
    preview_image_filesystem = get_config()['preview_image_filesystem']
    thumbnail_filesystem = get_config()['thumbnail_filesystem']

    filenames = glob(os.path.join(filesystem, '*/*.fits'))
    grouped_filenames = group_filenames(filenames)
    logging.info(f"Found {len(filenames)} filenames")

    for file_list in grouped_filenames:
        filename = file_list[0]
        # Determine the save location
        try:
            identifier = 'jw{}'.format(filename_parser(filename)['program_id'])
        except ValueError as error:
            identifier = os.path.basename(filename).split('.fits')[0]

        preview_output_directory = os.path.join(preview_image_filesystem,
                                                identifier)
        thumbnail_output_directory = os.path.join(thumbnail_filesystem,
                                                  identifier)

        # Check to see if the preview images already exist and skip
        # if they do
        file_exists = check_existence(file_list, preview_output_directory)
        if file_exists:
            logging.info(
                "JPG already exists for {}, skipping.".format(filename))
            continue

        # Create the output directories if necessary
        if not os.path.exists(preview_output_directory):
            os.makedirs(preview_output_directory)
            permissions.set_permissions(preview_output_directory)
            logging.info(f'Created directory {preview_output_directory}')
        if not os.path.exists(thumbnail_output_directory):
            os.makedirs(thumbnail_output_directory)
            permissions.set_permissions(thumbnail_output_directory)
            logging.info(f'Created directory {thumbnail_output_directory}')

        # If the exposure contains more than one file (because more
        # than one detector was used), then create a mosaic
        max_size = 8
        numfiles = len(file_list)
        if numfiles != 1:
            try:
                mosaic_image, mosaic_dq = create_mosaic(file_list)
                logging.info('Created mosiac for:')
                for item in file_list:
                    logging.info(f'\t{item}')
            except (ValueError, FileNotFoundError) as error:
                logging.error(error)
            dummy_file = create_dummy_filename(file_list)
            if numfiles in [2, 4]:
                max_size = 16
            elif numfiles in [8]:
                max_size = 32

        # Create the nominal preview image and thumbnail
        try:
            im = PreviewImage(filename, "SCI")
            im.clip_percent = 0.01
            im.scaling = 'log'
            im.cmap = 'viridis'
            im.output_format = 'jpg'
            im.preview_output_directory = preview_output_directory
            im.thumbnail_output_directory = thumbnail_output_directory

            # If a mosaic was made from more than one file
            # insert it and it's associated DQ array into the
            # instance of PreviewImage. Also set the input
            # filename to indicate that we have mosaicked data
            if numfiles != 1:
                im.data = mosaic_image
                im.dq = mosaic_dq
                im.file = dummy_file

            im.make_image(max_img_size=max_size)
        except ValueError as error:
            logging.warning(error)

    # Complete logging:
    logging.info("Completed.")
Beispiel #18
0
Dependencies
------------

    Users must have a ``config.json`` configuration file with a proper
    ``connection_string`` key that points to the ``jwqldb`` database.
    The ``connection_string`` format is
    ``postgresql+psycopg2://user:password@host:port/database``.
"""

from jwql.database.database_interface import base, set_read_permissions
from jwql.utils.utils import get_config


if __name__ == '__main__':

    connection_string = get_config()['connection_string']
    server_type = connection_string.split('@')[-1][0]

    assert server_type != 'p', 'Cannot reset production database!'

    prompt = ('About to reset all tables for database instance {}. Do you '
              'wish to proceed? (y/n)\n'.format(connection_string))
    response = input(prompt)

    if response.lower() == 'y':
        base.metadata.drop_all()
        base.metadata.create_all()
        set_read_permissions()
        print('\nDatabase instance {} has been reset'.format(connection_string))
Beispiel #19
0
import jwql.instrument_monitors.miri_monitors.data_trending.utils.mnemonics as mn
import jwql.instrument_monitors.miri_monitors.data_trending.utils.sql_interface as sql
import jwql.instrument_monitors.miri_monitors.data_trending.utils.csv_to_AstropyTable as apt
from jwql.instrument_monitors.miri_monitors.data_trending.utils.process_data import whole_day_routine, wheelpos_routine
from jwql.utils.utils import get_config, filename_parser

import os
import glob
import statistics
import sqlite3

#set _location_ variable
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))

#files with data to initially fill the database
directory = os.path.join(get_config()['outputs'], 'miri_data_trending', 'trainings_data_day', '*.CSV')
paths = glob.glob(directory)


def process_file(conn, path):
    '''Parse CSV file, process data within and put to DB

    Parameters
    ----------
    conn : DBobject
        Connection object to temporary database
    path : str
        defines file to read
    '''

    m_raw_data = apt.mnemonics(path)
Beispiel #20
0
from bokeh.palettes import Category20_20 as palette
from bokeh.plotting import figure, output_file, save

from jwql.database.database_interface import engine
from jwql.database.database_interface import session
from jwql.database.database_interface import FilesystemGeneral
from jwql.database.database_interface import FilesystemInstrument
from jwql.database.database_interface import CentralStore
from jwql.utils.logging_functions import configure_logging, log_info, log_fail
from jwql.utils.permissions import set_permissions
from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE
from jwql.utils.utils import filename_parser
from jwql.utils.utils import get_config
from jwql.utils.monitor_utils import initialize_instrument_monitor, update_monitor_table

SETTINGS = get_config()
FILESYSTEM = SETTINGS['filesystem']
PROPRIETARY_FILESYSTEM = os.path.join(FILESYSTEM, 'proprietary')
PUBLIC_FILESYSTEM = os.path.join(FILESYSTEM, 'public')
CENTRAL = SETTINGS['jwql_dir']
OUTPUTS = SETTINGS['outputs']


def gather_statistics(general_results_dict, instrument_results_dict):
    """Walks the filesytem to gather various statistics to eventually
    store in the database

    Parameters
    ----------
    general_results_dict : dict
        A dictionary for the ``filesystem_general`` database table
Beispiel #21
0
import os
import re
import tempfile

from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
from django.conf import settings
import numpy as np
from operator import itemgetter

# astroquery.mast import that depends on value of auth_mast
# this import has to be made before any other import of astroquery.mast
from jwql.utils.utils import get_config, filename_parser, check_config_for_key
check_config_for_key('auth_mast')
auth_mast = get_config()['auth_mast']
mast_flavour = '.'.join(auth_mast.split('.')[1:])
from astropy import config
conf = config.get_config('astroquery')
conf['mast'] = {'server': 'https://{}'.format(mast_flavour)}
from astroquery.mast import Mast
from jwedb.edb_interface import mnemonic_inventory

from jwql.database import database_interface as di
from jwql.database.database_interface import load_connection
from jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info
from jwql.instrument_monitors.miri_monitors.data_trending import dashboard as miri_dash
from jwql.instrument_monitors.nirspec_monitors.data_trending import dashboard as nirspec_dash
from jwql.jwql_monitors import monitor_cron_jobs
from jwql.utils.utils import ensure_dir_exists
from jwql.utils.constants import MONITORS, JWST_INSTRUMENT_NAMES_MIXEDCASE
Beispiel #22
0
def status(production_mode=True):
    """Main function: determine the status of the instrument montiors
    by examining log files.

    Parameters
    ----------
    production_mode : bool
        If ``True``, look in the main log directory. If ``False``, look
        in the ``dev`` log file directory.

    Returns
    -------
    logfile_status : dict
        Nested dictionary containing the status for all monitors. Top
        level keys include all monitors. Within a given monitor, the
        value is a dictionary containing 'missing_file' and 'status'
        keys. 'missing_file' is a boolean describing whether or not
        there is a suspected missing log file based on the timestamps
        of the existing files. 'status' is a string that is either
        'success' or 'failure'.
    """
    # Begin logging
    logging.info("Beginning cron job status monitor")

    # Get main logfile path
    log_path = get_config()['log_dir']

    # If we are in development mode, the log files are in a slightly
    # different location than in production mode
    if production_mode:
        log_path = os.path.join(log_path, 'prod')
    else:
        log_path = os.path.join(log_path, 'dev')

    # Set up a dictionary to keep track of results
    logfile_status = {}

    # Get a list of the directories under the main logging directory.
    generator = os.walk(log_path, topdown=True)

    # Loop over monitors
    for subdir, subsubdir, filenames in generator:
        # When running in production mode, skip the 'dev' subdirectory,
        # as it contains the development version of the monitor logs
        if production_mode:
            subsubdir[:] = [dirname for dirname in subsubdir if dirname != 'dev']

        if len(filenames) > 0:
            monitor_name = subdir.split('/')[-1]

            # Avoid monitor_cron_jobs itseft
            if monitor_name != 'monitor_cron_jobs':

                log_file_list = [os.path.join(subdir, filename) for filename in filenames]

                # Find the cadence of the monitor
                delta_time, stdev_time = get_cadence(log_file_list)

                # Identify the most recent log file
                latest_log, latest_log_time = find_latest(log_file_list)

                # Check to see if we expect a file more recent than the latest
                missing_file = missing_file_check(delta_time, stdev_time, latest_log)
                if missing_file:
                    logging.warning('Expected a more recent {} logfile than {}'
                                    .format(monitor_name, os.path.basename(latest_log)))

                # Check the file for success/failure
                result = success_check(latest_log)
                logging.info('{}: Latest log file indicates {}'.format(monitor_name, result))

                # Add results to the dictionary
                logfile_status[monitor_name] = {'logname': os.path.basename(latest_log),
                                                'latest_time': latest_log_time,
                                                'missing_file': missing_file, 'status': result}

    # Create table of results using Bokeh
    create_table(logfile_status)
    logging.info('Cron job status monitor completed successfully.')
Beispiel #23
0
def get_image_info(file_root, rewrite):
    """Build and return a dictionary containing information for a given
    ``file_root``.

    Parameters
    ----------
    file_root : str
        The rootname of the file of interest (e.g.
        ``jw86600008001_02101_00007_guider2``).
    rewrite : bool
        ``True`` if the corresponding JPEG needs to be rewritten,
        ``False`` if not.

    Returns
    -------
    image_info : dict
        A dictionary containing various information for the given
        ``file_root``.
    """

    # Initialize dictionary to store information
    image_info = {}
    image_info['all_jpegs'] = []
    image_info['suffixes'] = []
    image_info['num_ints'] = {}

    preview_dir = os.path.join(get_config()['jwql_dir'], 'preview_images')

    # Find all of the matching files
    dirname = file_root[:7]
    search_filepath = os.path.join(FILESYSTEM_DIR, dirname,
                                   file_root + '*.fits')
    image_info['all_files'] = glob.glob(search_filepath)

    for file in image_info['all_files']:

        # Get suffix information
        suffix = os.path.basename(file).split('_')[4].split('.')[0]
        image_info['suffixes'].append(suffix)

        # Determine JPEG file location
        jpg_dir = os.path.join(preview_dir, dirname)
        jpg_filename = os.path.basename(
            os.path.splitext(file)[0] + '_integ0.jpg')
        jpg_filepath = os.path.join(jpg_dir, jpg_filename)

        # Check that a jpg does not already exist. If it does (and rewrite=False),
        # just call the existing jpg file
        if os.path.exists(jpg_filepath) and not rewrite:
            pass

        # If it doesn't, make it using the preview_image module
        else:
            if not os.path.exists(jpg_dir):
                os.makedirs(jpg_dir)
            im = PreviewImage(file, 'SCI')
            im.output_directory = jpg_dir
            im.make_image()

        # Record how many integrations there are per filetype
        search_jpgs = os.path.join(preview_dir, dirname,
                                   file_root + '_{}_integ*.jpg'.format(suffix))
        num_jpgs = len(glob.glob(search_jpgs))
        image_info['num_ints'][suffix] = num_jpgs

        image_info['all_jpegs'].append(jpg_filepath)

    return image_info
Beispiel #24
0
from bokeh.palettes import Category20_20 as palette
from bokeh.plotting import figure, output_file, save

from jwql.database.database_interface import engine
from jwql.database.database_interface import session
from jwql.database.database_interface import FilesystemGeneral
from jwql.database.database_interface import FilesystemInstrument
from jwql.database.database_interface import CentralStore
from jwql.utils.logging_functions import configure_logging, log_info, log_fail
from jwql.utils.permissions import set_permissions
from jwql.utils.constants import FILE_SUFFIX_TYPES, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE
from jwql.utils.utils import filename_parser
from jwql.utils.utils import get_config
from jwql.utils.monitor_utils import initialize_instrument_monitor, update_monitor_table

FILESYSTEM = get_config()['filesystem']
PROPRIETARY_FILESYSTEM = os.path.join(FILESYSTEM, 'proprietary')
PUBLIC_FILESYSTEM = os.path.join(FILESYSTEM, 'public')
CENTRAL = get_config()['jwql_dir']


def gather_statistics(general_results_dict, instrument_results_dict):
    """Walks the filesytem to gather various statistics to eventually
    store in the database

    Parameters
    ----------
    general_results_dict : dict
        A dictionary for the ``filesystem_general`` database table
    instrument_results_dict : dict
        A dictionary for the ``filesystem_instrument`` database table