コード例 #1
0
def validate_read_noise(results, det_names):
    """Validate and persist read noise results."""
    run = siteUtils.getRunNumber()
    missing_det_names = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)

        read_noise_file = '%s_eotest_results.fits' % file_prefix
        if not os.path.isfile(read_noise_file):
            # No data for this detector, so note that and continue
            # with the others.
            missing_det_names.add(det_name)
            continue
        data = sensorTest.EOTestResults(read_noise_file)
        amps = data['AMP']
        read_noise_data = data['READ_NOISE']
        system_noise_data = data['SYSTEM_NOISE']
        total_noise_data = data['TOTAL_NOISE']
        for amp, read_noise, system_noise, total_noise \
            in zip(amps, read_noise_data, system_noise_data, total_noise_data):
            results.append(lcatr.schema.valid(
                lcatr.schema.get('read_noise_BOT'),
                amp=amp, read_noise=read_noise, system_noise=system_noise,
                total_noise=total_noise, slot=slot, raft=raft))

        files = glob.glob('%s_read_noise?*.fits' % file_prefix)
        for fitsfile in files:
            eotestUtils.addHeaderData(fitsfile, TESTTYPE='FE55',
                                      DATE=eotestUtils.utc_now_isoformat())

        data_products = [siteUtils.make_fileref(item) for item in files]
        results.extend(data_products)

        # Persist the png files.
        metadata = dict(DETECTOR=det_name, TESTTYPE='FE55', TEST_CATEGORY='EO',
                        RUN=run)
        filename = '%s_correlated_noise.png' % file_prefix
        results.extend(siteUtils.persist_png_files(filename, file_prefix,
                                                   metadata=metadata))

    # Persist the raft-level overscan correlation plots.
    for raft in camera_info.get_installed_raft_names():
        metadata = dict(TESTTYPE='FE55', TEST_CATEGORY='EO', RAFT=raft, RUN=run)
        file_prefix = make_file_prefix(run, raft)
        filename = '%s_overscan_correlations.png' % file_prefix
        results.extend(siteUtils.persist_png_files(filename, file_prefix,
                                                   metadata=metadata))

    report_missing_data("validate_read_noise", missing_det_names)

    return results
#!/usr/bin/env ipython
"""
Producer script for BOT scan mode analysis.
"""
import os
from camera_components import camera_info
from scan_mode_analysis_jh_task import scan_mode_analysis_jh_task
from bot_eo_analyses import get_analysis_types, run_python_task_or_cl_script

if 'scan' in get_analysis_types():
    scan_mode_script \
        = os.path.join(os.environ['EOANALYSISJOBSDIR'],
                       'harnessed_jobs', 'scan_mode_analysis_BOT',
                       'v0', 'scan_mode_analysis_jh_task.py')
    installed_rafts = camera_info.get_installed_raft_names()
    run_python_task_or_cl_script(scan_mode_analysis_jh_task,
                                 scan_mode_script,
                                 device_names=installed_rafts)
コード例 #3
0
def run_jh_tasks(*jh_tasks, device_names=None, processes=None, walltime=3600):
    """
    Run functions to execute tasks under the job harness in parallel.
    These functions should take a device name as its only argument, and
    the parallelization will take place over device_names.

    Parameters
    ----------
    jh_tasks: list-like container of functions
        These functions are serialized and dispatched to workers on
        remote nodes, so all dependencies should be imported in the
        bodies of the functions.
    device_names: list-like container of device names [None]
        List of sensors or rafts on which to operate.  If None, then
        the installed sensors in the focal plane is used.
    processes: int [None]
        Number of processes to run in parallel. If None, then all
        available processes can be potentially used.
    walltime: float [3600]
        Walltime in seconds for python app execution.  If the python app
        does not return within walltime, a parsl.app.errors.AppTimeout
        exception will be thrown.

    Raises
    ------
    parsl.app.errors.AppTimeout

    Notes
    -----
    Because the number of jh_task functions can vary, the keyword arguments
    should reference the keywords explicitly, i.e., one cannot rely on
    keyword position to pass those values.
    """
    if device_names is None:
        device_names = camera_info.get_det_names()

    # Restrict to installed rafts or sensors.  This function call
    # also writes the camera_info cache file for the eT db query.
    installed_rafts = camera_info.get_installed_raft_names()

    # Check if rafts are over-ridden in the lcatr.cfg file.
    override_rafts = os.environ.get('LCATR_RAFTS', None)
    if override_rafts is not None:
        installed_rafts = override_rafts.split('_')

    device_names = [_ for _ in device_names if _[:3] in installed_rafts]

    cwd = os.path.abspath('.')

    # Query eT database for file paths from a previous run, if
    # specified, and store in a pickle file.
    hj_fp_server = siteUtils.HarnessedJobFilePaths()

    # Query for file paths for other analysis runs, if specified in
    # the bot_eo_config_file.
    for analysis_type in ('badpixel', 'bias', 'dark', 'linearity',
                          'nonlinearity'):
        hj_fp_server.query_file_paths(
            siteUtils.get_analysis_run(analysis_type))

    hj_fp_server_file = 'hj_fp_server.pkl'
    with open(hj_fp_server_file, 'wb') as output:
        pickle.dump(hj_fp_server, output)

    # Create a GetAmplifierGains object in order to query the eT
    # database for gain results from previous runs and write a pickle
    # file that can be loaded locally from disk by the various jh
    # tasks being run in parallel to avoid eT db access contention.
    GetAmplifierGains()

    for jh_task in jh_tasks:
        # Add 30 second sleep before launching jh_task processes in
        # parallel to allow for parsl process_pool_workers from the
        # previous set of jh_task processes to finish.
        time.sleep(30)
        run_device_analysis_pool(jh_task, device_names,
                                 processes=processes, cwd=cwd,
                                 walltime=walltime)
コード例 #4
0
def validate_tearing(results, det_names):
    """Validate the tearing analysis results."""
    run = siteUtils.getRunNumber()
    schema = lcatr.schema.get('tearing_detection_BOT')
    amps_schema = lcatr.schema.get('tearing_stats_BOT')
    missing_det_names = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)

        tearing_results_file = '%s_tearing_stats.pickle' % file_prefix
        if not os.path.isfile(tearing_results_file):
            missing_det_names.add(det_name)
            continue
        results.append(siteUtils.make_fileref(tearing_results_file))
        with open(tearing_results_file, 'rb') as input_:
            tearing_stats, amp_counts = pickle.load(input_)
        for values in tearing_stats:
            stats = dict(
                zip(('job_name', 'subset', 'sensor_id', 'detections', 'slot',
                     'raft'),
                    list(values) + [slot, raft]))
            results.append(lcatr.schema.valid(schema, **stats))
        for amp, detections in amp_counts.items():
            results.append(
                lcatr.schema.valid(amps_schema,
                                   amp=amp,
                                   slot=slot,
                                   raft=raft,
                                   tearing_detections=detections))

    png_files = sorted(glob.glob('*_tearing.png'))
    results.extend(persist_tearing_png_files(png_files))

    missing_raft_names = set()
    for raft_name in camera_info.get_installed_raft_names():
        try:
            divisidero_plot = glob.glob(f'{raft_name}_*_divisidero.png')[0]
        except IndexError:
            missing_raft_names.add(raft_name)
            continue

        md = dict(DATA_PRODUCT='divisidero_tearing_plot', LsstId=raft_name)
        results.append(siteUtils.make_fileref(divisidero_plot, metadata=md))

        try:
            divisidero_json_file \
                = glob.glob(f'{raft_name}*max_divisidero.json')[0]
        except IndexError:
            missing_raft_names.add(raft_name)
            continue

        with open(divisidero_json_file, 'r') as fd:
            max_devs = json.load(fd)
        results.append(siteUtils.make_fileref(divisidero_json_file))

        bot_schema = lcatr.schema.get('divisadero_tearing_BOT')
        for slot, values in max_devs.items():
            # Weed out nans and infinities.
            max_dev_values = []
            for value in values:
                if np.isfinite(value):
                    max_dev_values.append(value)
                else:
                    max_dev_values.append(0)
            # Top half of CCD.
            my_devs = max_dev_values[:7]
            for amp, devs in enumerate(zip([0] + my_devs, my_devs + [0]), 1):
                results.append(
                    lcatr.schema.valid(bot_schema,
                                       amp=amp,
                                       slot=slot,
                                       raft=raft_name,
                                       divisadero_max_dev=max(devs)))
            if len(max_dev_values) == 7:
                # This is a WF sensor.
                continue
            # Bottom half of CCD.
            my_devs = max_dev_values[7:]
            my_devs.reverse()
            for amp, devs in enumerate(zip([0] + my_devs, my_devs + [0]), 9):
                results.append(
                    lcatr.schema.valid(bot_schema,
                                       amp=amp,
                                       slot=slot,
                                       raft=raft_name,
                                       divisadero_max_dev=max(devs)))

    report_missing_data("validate_tearing", missing_det_names)
    report_missing_data("validate_tearing",
                        sorted(list(missing_raft_names)),
                        components='rafts',
                        total=25)
    return results
コード例 #5
0
def validate_flat_pairs(results, det_names):
    """Validate the flat pair analysis results."""
    run = siteUtils.getRunNumber()
    missing_det_names = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        det_resp_data = '%s_det_response.fits' % file_prefix
        if not os.path.isfile(det_resp_data):
            missing_det_names.add(det_name)
            continue
        eotestUtils.addHeaderData(det_resp_data,
                                  DETECTOR=det_name,
                                  TESTTYPE='FLAT',
                                  DATE=eotestUtils.utc_now_isoformat())
        results.append(siteUtils.make_fileref(det_resp_data))

        results_file = '%s_eotest_results.fits' % file_prefix
        data = sensorTest.EOTestResults(results_file)
        amps = data['AMP']
        max_observed_signal_data = data['MAX_OBSERVED_SIGNAL']
        max_frac_dev_data = data['MAX_FRAC_DEV']
        row_mean_var_slope_data = data['ROW_MEAN_VAR_SLOPE']
        linearity_turnoff_data = data['LINEARITY_TURNOFF']

        for amp, max_observed_signal, max_frac_dev, row_mean_var_slope, \
            linearity_turnoff in zip(amps, max_observed_signal_data,
                                     max_frac_dev_data,
                                     row_mean_var_slope_data,
                                     linearity_turnoff_data):
            results.append(
                lcatr.schema.valid(lcatr.schema.get('flat_pairs_BOT'),
                                   amp=amp,
                                   max_observed_signal=max_observed_signal,
                                   max_frac_dev=max_frac_dev,
                                   row_mean_var_slope=row_mean_var_slope,
                                   linearity_turnoff=linearity_turnoff,
                                   slot=slot,
                                   raft=raft))

        # Persist the png files.
        metadata = dict(DETECTOR=det_name,
                        RUN=run,
                        TESTTYPE='FLAT',
                        TEST_CATEGORY='EO')
        results.extend(
            siteUtils.persist_png_files(('%s_linearity*.png' % file_prefix),
                                        file_prefix,
                                        metadata=metadata))
        results.extend(
            siteUtils.persist_png_files(
                ('%s_row_means_variance.png' % file_prefix),
                file_prefix,
                metadata=metadata))

    # Persist the raft-level imaging region correlation plots.
    missing_raft_names = set()
    for raft in camera_info.get_installed_raft_names():
        metadata = dict(TESTTYPE='FLAT',
                        TEST_CATEGORY='EO',
                        RAFT=raft,
                        RUN=run)
        file_prefix = make_file_prefix(run, raft)
        filename = f'{file_prefix}_imaging_region_correlations.png'
        if not os.path.isfile(filename):
            missing_raft_names.add(raft)
            continue
        results.extend(
            siteUtils.persist_png_files(filename,
                                        file_prefix,
                                        metadata=metadata))

    # Persist any pd correction file specified in the lcatr.cfg file.
    pd_corrections_file_env = 'LCATR_PD_CORRECTIONS_FILE'
    if pd_corrections_file_env in os.environ:
        pd_corrections_file = os.environ[pd_corrections_file_env]
        shutil.copy(pd_corrections_file, '.')
        fileref = siteUtils.make_fileref(os.path.basename(pd_corrections_file))
        results.append(fileref)

    report_missing_data("validate_flat_pairs", missing_det_names)
    report_missing_data("validate_flat_pairs",
                        sorted(list(missing_raft_names)),
                        components='rafts',
                        total=21)

    return results
#!/usr/bin/env ipython
"""
Validator script for BOT raft-level results summaries.
"""
import glob
import lcatr.schema
import siteUtils
from camera_components import camera_info
from bot_eo_validators import validate_raft_results

results = []
results = validate_raft_results(results, camera_info.get_installed_raft_names())

#
# Validate focal plane heat map plots
#
unit_id = siteUtils.getUnitId()
run = siteUtils.getRunNumber()
png_files = glob.glob('{}_{}*.png'.format(unit_id, run))
results.extend(siteUtils.persist_png_files('', unit_id, png_files=png_files))

results.extend(siteUtils.jobInfo())
lcatr.schema.write_file(results)
lcatr.schema.validate_file()