Exemple #1
0
def validate_read_noise(results, det_names):
    """Validate and persist read noise results."""
    run = siteUtils.getRunNumber()
    missing_det_names = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)

        read_noise_file = '%s_eotest_results.fits' % file_prefix
        if not os.path.isfile(read_noise_file):
            # No data for this detector, so note that and continue
            # with the others.
            missing_det_names.add(det_name)
            continue
        data = sensorTest.EOTestResults(read_noise_file)
        amps = data['AMP']
        read_noise_data = data['READ_NOISE']
        system_noise_data = data['SYSTEM_NOISE']
        total_noise_data = data['TOTAL_NOISE']
        for amp, read_noise, system_noise, total_noise \
            in zip(amps, read_noise_data, system_noise_data, total_noise_data):
            results.append(lcatr.schema.valid(
                lcatr.schema.get('read_noise_BOT'),
                amp=amp, read_noise=read_noise, system_noise=system_noise,
                total_noise=total_noise, slot=slot, raft=raft))

        files = glob.glob('%s_read_noise?*.fits' % file_prefix)
        for fitsfile in files:
            eotestUtils.addHeaderData(fitsfile, TESTTYPE='FE55',
                                      DATE=eotestUtils.utc_now_isoformat())

        data_products = [siteUtils.make_fileref(item) for item in files]
        results.extend(data_products)

        # Persist the png files.
        metadata = dict(DETECTOR=det_name, TESTTYPE='FE55', TEST_CATEGORY='EO',
                        RUN=run)
        filename = '%s_correlated_noise.png' % file_prefix
        results.extend(siteUtils.persist_png_files(filename, file_prefix,
                                                   metadata=metadata))

    # Persist the raft-level overscan correlation plots.
    for raft in camera_info.get_installed_raft_names():
        metadata = dict(TESTTYPE='FE55', TEST_CATEGORY='EO', RAFT=raft, RUN=run)
        file_prefix = make_file_prefix(run, raft)
        filename = '%s_overscan_correlations.png' % file_prefix
        results.extend(siteUtils.persist_png_files(filename, file_prefix,
                                                   metadata=metadata))

    report_missing_data("validate_read_noise", missing_det_names)

    return results
Exemple #2
0
def flat_gain_stability_jh_task(det_name):
    """JH version of single sensor execution of the flat pairs task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        bias_filename, flat_gain_stability_task,\
        get_mask_files, medianed_dark_frame

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    flat_files = siteUtils.dependency_glob(glob_pattern('tearing', det_name),
                                           acq_jobname=acq_jobname)
    if not flat_files:
        print("flat_gain_stability_task: Flat pairs files not found for",
              det_name)
        return None

    mask_files = get_mask_files(det_name)
    bias_frame = bias_filename(run, det_name)
    dark_frame = medianed_dark_frame(det_name)

    return flat_gain_stability_task(run,
                                    det_name,
                                    flat_files,
                                    mask_files=mask_files,
                                    bias_frame=bias_frame,
                                    dark_frame=dark_frame)
def validate_overscan(results, det_names):
    """Validate the overscan analysis results."""
    run = siteUtils.getRunNumber()
    results = []
    missing_det_names = set()
    for det_name in det_names:
        file_prefix = make_file_prefix(run, det_name)
        results_file = f'{file_prefix}_overscan_results.fits'
        if not os.path.isfile(results_file):
            missing_det_names.add(det_name)
        else:
            md = dict(DATA_PRODUCT='overscan_task_results',
                      RUN=run,
                      DETECTOR=det_name)
            results.append(siteUtils.make_fileref(results_file, metadata=md))
        png_files = (glob.glob(f'{file_prefix}_*_eper_*.png') +
                     glob.glob(f'{file_prefix}_*_overscan_*.png') +
                     glob.glob(f'{file_prefix}_*_cti.png'))
        md = dict(TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run)
        results.extend(
            siteUtils.persist_png_files('',
                                        file_prefix,
                                        png_files=png_files,
                                        metadata=md))
    report_missing_data('validate_overscan', missing_det_names)
    return results
def dark_defects_jh_task(det_name):
    """JH version of single sensor execution of the dark defects task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, bias_filename, dark_defects_task, get_mask_files

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    sflat_files \
        = siteUtils.dependency_glob(glob_pattern('dark_defects', det_name),
                                    acq_jobname=acq_jobname)
    if not sflat_files:
        print("dark_defects_task: No high flux superflat files found for",
              det_name)
        return None

    mask_files = sorted(glob.glob(f'{file_prefix}*mask*.fits'))
    bias_frame = bias_filename(run, det_name)

    return dark_defects_task(run,
                             det_name,
                             sflat_files,
                             mask_files=mask_files,
                             bias_frame=bias_frame)
def persistence_jh_task(det_name):
    """JH version of the persistence_task."""
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern, \
        bias_frame_task, get_mask_files, get_bot_eo_config, persistence_task

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)

    acq_jobname = siteUtils.getProcessName('BOT_acq')
    bias_files \
        = siteUtils.dependency_glob(glob_pattern('persistence_bias', det_name),
                                    acq_jobname=acq_jobname,
                                    description='Persistence bias frames:')
    dark_files \
        = siteUtils.dependency_glob(glob_pattern('persistence_dark', det_name),
                                    acq_jobname=acq_jobname,
                                    description='Persistence dark frames:')
    if not bias_files or not dark_files:
        print("persistence_task: Needed data files are missing for detector",
              det_name)
        return None

    # Sort by test sequence number, i.e., by filenames.
    bias_files = sorted(bias_files)
    dark_files = sorted(dark_files)

    # Make a superbias frame using the pre-exposure persistence bias
    # files, skipping the first exposure.
    superbias_frame = f'{file_prefix}_persistence_superbias.fits'
    bias_frame_task(run, det_name, bias_files, bias_frame=superbias_frame)

    return persistence_task(run, det_name, dark_files, superbias_frame,
                            get_mask_files(det_name))
def bf_jh_task(det_name):
    """JH version of single sensor execution of the brighter-fatter task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        bias_filename, bf_task, find_flat2_bot, get_mask_files,\
        get_amplifier_gains

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    flat_files \
        = siteUtils.dependency_glob(glob_pattern('brighter_fatter', det_name),
                                    acq_jobname=acq_jobname)

    if not flat_files:
        print("bf_jh_task: Flat pairs files not found for detector", det_name)
        return None

    flat_files = [_ for _ in flat_files if 'flat1' in _]

    mask_files = get_mask_files(det_name)
    eotest_results_file = '{}_eotest_results.fits'.format(file_prefix)
    gains = get_amplifier_gains(eotest_results_file)
    bias_frame = bias_filename(run, det_name)

    return bf_task(run,
                   det_name,
                   flat_files,
                   gains,
                   mask_files=mask_files,
                   flat2_finder=find_flat2_bot,
                   bias_frame=bias_frame)
Exemple #7
0
def dark_current_jh_task(det_name):
    """JH version of single sensor execution of the dark current task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, bias_filename, dark_current_task,\
        plot_ccd_total_noise, get_mask_files

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    dark_files \
        = siteUtils.dependency_glob(glob_pattern('dark_current', det_name),
                                    acq_jobname=acq_jobname,
                                    description="Dark current frames:")
    if not dark_files:
        print("dark_current_task: No dark files found for detector", det_name)
        return None

    mask_files = get_mask_files(det_name)
    eotest_results_file \
        = siteUtils.dependency_glob('{}_eotest_results.fits'.format(file_prefix),
                                    jobname='read_noise_BOT')[0]
    gains = get_amplifier_gains('{}_eotest_results.fits'.format(file_prefix))
    bias_frame = bias_filename(run, det_name)

    dark_curr_pixels, dark95s \
        = dark_current_task(run, det_name, dark_files, gains,
                            mask_files=mask_files, bias_frame=bias_frame)
    plot_ccd_total_noise(run, det_name, dark_curr_pixels, dark95s,
                         eotest_results_file)
    return dark_curr_pixels, dark95s
def validate_tearing(results, det_names):
    """Validate the tearing analysis results."""
    run = siteUtils.getRunNumber()
    schema = lcatr.schema.get('tearing_detection_BOT')
    missing_det_names = []
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)

        tearing_results_file = '%s_tearing_stats.pkl' % file_prefix
        if not os.path.isfile(tearing_results_file):
            missing_det_names.append(det_name)
            continue
        with open(tearing_results_file, 'rb') as input_:
            tearing_stats = pickle.load(input_)
        for values in tearing_stats:
            stats = dict(kv for kv in zip(('job_name', 'subset', 'sensor_id',
                                           'detections', 'slot', 'raft'),
                                          list(values) + [slot, raft]))
            results.append(lcatr.schema.valid(schema, **stats))

    png_files = sorted(glob.glob('*_tearing.png'))
    results.extend(persist_tearing_png_files(png_files))

    report_missing_data("validate_tearing", missing_det_names)

    return results
def validate_traps(results, det_names):
    """Validate and persist trap results."""
    run = siteUtils.getRunNumber()
    missing_det_names = []
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        trap_file = '%s_traps.fits' % file_prefix
        if not os.path.isfile(trap_file):
            missing_det_names.append(det_name)
            continue
        eotestUtils.addHeaderData(trap_file,
                                  TESTTYPE='TRAP',
                                  DATE=eotestUtils.utc_now_isoformat())
        results.append(siteUtils.make_fileref(trap_file))

        mask_file = '%s_traps_mask.fits' % file_prefix
        results.append(siteUtils.make_fileref(mask_file))

        results_file = '%s_eotest_results.fits' % file_prefix
        data = sensorTest.EOTestResults(results_file)
        amps = data['AMP']
        num_traps = data['NUM_TRAPS']

        for amp, ntrap in zip(amps, num_traps):
            results.append(
                lcatr.schema.valid(lcatr.schema.get('traps_BOT'),
                                   amp=amp,
                                   num_traps=ntrap,
                                   slot=slot,
                                   raft=raft))

    report_missing_data("validate_traps", missing_det_names)

    return results
def ptc_jh_task(det_name):
    """JH version of single sensor execution of the PTC task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, bias_filename, ptc_task, get_mask_files

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    flat_files = siteUtils.dependency_glob(glob_pattern('ptc', det_name),
                                           acq_jobname=acq_jobname)
    if not flat_files:
        print("ptc_task: Flat pairs files not found for detector", det_name)
        return None

    mask_files = get_mask_files(det_name)
    eotest_results_file = '{}_eotest_results.fits'.format(file_prefix)
    gains = get_amplifier_gains(eotest_results_file)
    bias_frame = bias_filename(run, det_name)

    return ptc_task(run,
                    det_name,
                    flat_files,
                    gains,
                    mask_files=mask_files,
                    bias_frame=bias_frame)
def read_noise_jh_task(det_name):
    """JH version of the single sensor read noise task."""
    import os
    import glob
    import logging
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, read_noise_task, get_mask_files

    logger = logging.getLogger('read_noise_jh_task')
    logger.setLevel(logging.INFO)

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')
    nbias = os.environ.get('LCATR_NUM_BIAS_FRAMES', 10)

    bias_files \
        = siteUtils.dependency_glob(glob_pattern('read_noise', det_name),
                                    acq_jobname=acq_jobname)[:nbias]
    if not bias_files:
        logger.info(
            "read_noise_task: Needed data files are missing "
            "for detector %s", det_name)
        return None
    eotest_results_file = '{}_eotest_results.fits'.format(file_prefix)
    gains = get_amplifier_gains(eotest_results_file)

    mask_files = get_mask_files(det_name)

    return read_noise_task(run,
                           det_name,
                           bias_files,
                           gains,
                           mask_files=mask_files)
def validate_flat_gain_stability(results, det_names):
    """Valdiate the output files from the flat_gain_stability analysis"""
    if 'gainstability' not in get_analysis_types():
        return results

    run = siteUtils.getRunNumber()
    missing_det_names = set()
    for det_name in det_names:
        file_prefix = make_file_prefix(run, det_name)
        results_file = f'{file_prefix}_flat_signal_sequence.pickle'
        if not os.path.isfile(results_file):
            missing_det_names.add(det_name)
        else:
            md = dict(DATA_PRODUCT='flat_gain_stability_results')
            results.append(siteUtils.make_fileref(results_file, metadata=md))

    report_missing_data('validate_flat_gain_stability', missing_det_names)

    unit_id = siteUtils.getUnitId()
    png_files = glob.glob('*flat_gain_stability.png')
    for png_file in png_files:
        md = dict(DATA_PRODUCT='flat_gain_stability_plot')
        if unit_id in png_file:
            md['LsstId'] = unit_id
        results.append(siteUtils.make_fileref(png_file, metadata=md))

    return results
def validate_bias_frame(results, det_names):
    """Validate and persist medianed bias frames."""
    run = siteUtils.getRunNumber()
    missing_det_names = set()
    for det_name in det_names:
        file_prefix = make_file_prefix(run, det_name)
        bias_frame = f'{file_prefix}_median_bias.fits'
        rolloff_mask = f'{file_prefix}_edge_rolloff_mask.fits'
        pca_bias_file = f'{file_prefix}_pca_bias.fits'
        pca_superbias = f'{file_prefix}_pca_superbias.fits'

        # Add/update the metadata to the primary HDU of these files.
        for fitsfile in (bias_frame, rolloff_mask, pca_bias_file,
                         pca_superbias):
            if os.path.isfile(fitsfile):
                eotestUtils.addHeaderData(fitsfile,
                                          TESTTYPE='BIAS',
                                          DATE=eotestUtils.utc_now_isoformat())
                results.append(lcatr.schema.fileref.make(fitsfile))
            else:
                missing_det_names.add(det_name)

        # Persist the PCA bias model file.
        pca_bias_model = f'{file_prefix}_pca_bias.pickle'
        if os.path.isfile(pca_bias_model):
            results.append(lcatr.schema.fileref.make(pca_bias_model))
        else:
            missing_det_names.add(det_name)
    report_missing_data('validate_bias_frames', missing_det_names)
    return results
Exemple #14
0
def bright_defects_jh_task(det_name):
    """JH version of single sensor bright pixels task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, bias_filename, bright_defects_task, get_mask_files

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    dark_files \
        = siteUtils.dependency_glob(glob_pattern('bright_defects', det_name),
                                    acq_jobname=acq_jobname)
    if not dark_files:
        print("bright_defects_task: Needed data files missing for detector",
              det_name)
        return None

    eotest_results_file = '{}_eotest_results.fits'.format(file_prefix)
    gains = get_amplifier_gains(eotest_results_file)
    mask_files = sorted(glob.glob(f'{file_prefix}*mask*.fits'))
    bias_frame = bias_filename(run, det_name)

    return bright_defects_task(run,
                               det_name,
                               dark_files,
                               gains,
                               mask_files=mask_files,
                               bias_frame=bias_frame)
def validate_raft_results(results, raft_names):
    """Validate the raft level results."""
    run = siteUtils.getRunNumber()
    slot_names = camera_info.get_slot_names()
    md = siteUtils.DataCatalogMetadata(ORIGIN=siteUtils.getSiteName(),
                                       TEST_CATEGORY='EO',
                                       DATA_PRODUCT='EOTEST_RESULTS')
    missing_raft_names = []
    for raft_name in raft_names:
        for slot_name in slot_names:
            det_name = make_file_prefix(raft_name, slot_name)
            file_prefix = make_file_prefix(run, det_name)
            results_file = '{}_eotest_results.fits'.format(file_prefix)
            if not os.path.isfile(results_file):
                if raft_name not in missing_raft_names:
                    missing_raft_names.append(raft_name)
                continue
            eotestUtils.addHeaderData(results_file,
                                      DETECTOR=det_name,
                                      DATE=eotestUtils.utc_now_isoformat(),
                                      RUNNUM=run)
            results.append(
                lcatr.schema.fileref.make(results_file,
                                          metadata=md(SLOT=slot_name,
                                                      RAFT=raft_name)))

        # Persist the png files.
        png_file_list = '{}_raft_results_task_png_files.txt'.format(raft_name)
        if not os.path.isfile(png_file_list):
            continue
        with open(png_file_list, 'r') as input_:
            png_files = [x.strip() for x in input_]
        metadata = dict(TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run)
        results.extend(
            siteUtils.persist_png_files('',
                                        file_prefix,
                                        png_files=png_files,
                                        metadata=metadata))

    report_missing_data("validate_raft_results",
                        missing_raft_names,
                        components='rafts',
                        total=21)

    return results
def validate_qe(results, det_names):
    """Validate the QE results."""
    run = siteUtils.getRunNumber()
    missing_det_names = []
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)

        qe_results_file = '%s_QE.fits' % file_prefix
        if not os.path.isfile(qe_results_file):
            missing_det_names.append(det_name)
            continue
        with fits.open(qe_results_file) as qe_results:
            qe_data = qe_results['QE_BANDS'].data
            QE = OrderedDict((band, []) for band in qe_data.field('BAND'))
            for amp in range(1, 17):
                values = qe_data.field('AMP%02i' % amp)
            for band, value in zip(QE, values):
                QE[band].append(value)

        for band in QE:
            for amp in range(1, 17):
                results.append(
                    lcatr.schema.valid(lcatr.schema.get('qe_BOT_analysis'),
                                       band=band,
                                       QE=QE[band][amp - 1],
                                       amp=amp,
                                       slot=slot,
                                       raft=raft))

        qe_files = glob.glob('%s_*QE*.fits' % file_prefix)
        for item in qe_files:
            eotestUtils.addHeaderData(item,
                                      TESTTYPE='LAMBDA',
                                      DATE=eotestUtils.utc_now_isoformat())
        results.extend([siteUtils.make_fileref(item) for item in qe_files])

        # Persist the png files.
        metadata = dict(DETECTOR=det_name,
                        RUN=run,
                        TESTTYPE='LAMBDA',
                        TEST_CATEGORY='EO')
        results.extend(
            siteUtils.persist_png_files('%s*qe.png' % file_prefix,
                                        file_prefix,
                                        metadata=metadata))
        results.extend(
            siteUtils.persist_png_files('%s*flat.png' % file_prefix,
                                        file_prefix,
                                        metadata=metadata))

    report_missing_data("validate_qe", missing_det_names)

    return results
def validate_brighter_fatter(results, det_names):
    """Validate the brighter-fatter results."""
    run = siteUtils.getRunNumber()
    missing_det_names = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        bf_results = '%s_bf.fits' % file_prefix
        if not os.path.isfile(bf_results):
            missing_det_names.add(det_name)
            continue
        eotestUtils.addHeaderData(bf_results,
                                  TESTTYPE='FLAT',
                                  DATE=eotestUtils.utc_now_isoformat())

        results.append(siteUtils.make_fileref(bf_results))

        results_file = '%s_eotest_results.fits' % file_prefix
        data = sensorTest.EOTestResults(results_file)

        columns = (data['AMP'], data['BF_XCORR'], data['BF_XCORR_ERR'],
                   data['BF_YCORR'], data['BF_YCORR_ERR'], data['BF_SLOPEX'],
                   data['BF_SLOPEX_ERR'], data['BF_SLOPEY'],
                   data['BF_SLOPEY_ERR'], data['BF_MEAN'])
        for amp, bf_xcorr, bf_xcorr_err, bf_ycorr, bf_ycorr_err, \
            bf_slopex, bf_slopex_err, bf_slopey, bf_slopey_err, bf_mean \
            in zip(*columns):
            results.append(
                lcatr.schema.valid(lcatr.schema.get('brighter_fatter_BOT'),
                                   amp=amp,
                                   bf_xcorr=bf_xcorr,
                                   bf_xcorr_err=bf_xcorr_err,
                                   bf_ycorr=bf_ycorr,
                                   bf_ycorr_err=bf_ycorr_err,
                                   bf_slopex=bf_slopex,
                                   bf_slopex_err=bf_slopex_err,
                                   bf_slopey=bf_slopey,
                                   bf_slopey_err=bf_slopey_err,
                                   bf_mean=bf_mean,
                                   slot=slot,
                                   raft=raft))

        # Persist the png files.
        metadata = dict(DETECTOR=det_name,
                        RUN=run,
                        TESTTYPE='FLAT',
                        TEST_CATEGORY='EO')

        results.extend(
            siteUtils.persist_png_files('%s*brighter-fatter.png' % file_prefix,
                                        file_prefix,
                                        metadata=metadata))
    return results
def validate_bias_stability(results, det_names):
    """Validate bias stability results."""
    run = siteUtils.getRunNumber()
    rafts = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        rafts.add(raft)
        file_prefix = make_file_prefix(run, det_name)
        profile_plots = f'{file_prefix}_bias_serial_profiles.png'
        if not os.path.isfile(profile_plots):
            continue
        md = dict(raft=raft, slot=slot, run=run)
        results.append(siteUtils.make_fileref(profile_plots, metadata=md))
    for raft in rafts:
        file_prefix = make_file_prefix(run, raft)
        stats_file = f'{file_prefix}_bias_frame_stats.pickle'
        if not os.path.isfile(stats_file):
            continue
        md = dict(raft=raft, run=run)
        results.append(siteUtils.make_fileref(stats_file, metadata=md))
    return results
def validate_ptc(results, det_names):
    """Validate the PTC results."""
    run = siteUtils.getRunNumber()
    missing_det_names = []
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        ptc_results = '%s_ptc.fits' % file_prefix
        if not os.path.isfile(ptc_results):
            missing_det_names.append(det_name)
            continue
        eotestUtils.addHeaderData(ptc_results,
                                  TESTTYPE='FLAT',
                                  DATE=eotestUtils.utc_now_isoformat())

        results.append(siteUtils.make_fileref(ptc_results))

        results_file = '%s_eotest_results.fits' % file_prefix
        data = sensorTest.EOTestResults(results_file)

        columns = (data['AMP'], data['PTC_GAIN'], data['PTC_GAIN_ERROR'],
                   data['PTC_A00'], data['PTC_A00_ERROR'], data['PTC_NOISE'],
                   data['PTC_NOISE_ERROR'], data['PTC_TURNOFF'])
        for amp, gain, gain_error, a00, a00_error,\
            noise, noise_error, turnoff in zip(*columns):
            results.append(
                lcatr.schema.valid(lcatr.schema.get('ptc_BOT'),
                                   amp=amp,
                                   ptc_gain=gain,
                                   ptc_gain_error=gain_error,
                                   ptc_a00=a00,
                                   ptc_a00_error=a00_error,
                                   ptc_noise=noise,
                                   ptc_noise_error=noise_error,
                                   ptc_turnoff=turnoff,
                                   slot=slot,
                                   raft=raft))
        # Persist the png files.
        metadata = dict(DETECTOR=det_name,
                        RUN=run,
                        TESTTYPE='FLAT',
                        TEST_CATEGORY='EO')

        results.extend(
            siteUtils.persist_png_files('%s*ptcs.png' % file_prefix,
                                        file_prefix,
                                        metadata=metadata))

    report_missing_data("validate_ptc", missing_det_names)

    return results
Exemple #20
0
def validate_flat_pairs(results, det_names):
    """Validate the flat pair analysis results."""
    run = siteUtils.getRunNumber()
    missing_det_names = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        det_resp_data = '%s_det_response.fits' % file_prefix
        if not os.path.isfile(det_resp_data):
            missing_det_names.add(det_name)
            continue
        eotestUtils.addHeaderData(det_resp_data, DETECTOR=det_name,
                                  TESTTYPE='FLAT',
                                  DATE=eotestUtils.utc_now_isoformat())
        results.append(siteUtils.make_fileref(det_resp_data))

        results_file = '%s_eotest_results.fits' % file_prefix
        data = sensorTest.EOTestResults(results_file)
        amps = data['AMP']
        max_observed_signal_data = data['MAX_OBSERVED_SIGNAL']
        max_frac_dev_data = data['MAX_FRAC_DEV']
        row_mean_var_slope_data = data['ROW_MEAN_VAR_SLOPE']
        linearity_turnoff_data = data['LINEARITY_TURNOFF']

        for amp, max_observed_signal, max_frac_dev, row_mean_var_slope, \
            linearity_turnoff in zip(amps, max_observed_signal_data,
                                     max_frac_dev_data,
                                     row_mean_var_slope_data,
                                     linearity_turnoff_data):
            results.append(lcatr.schema.valid(
                lcatr.schema.get('flat_pairs_BOT'),
                amp=amp, max_observed_signal=max_observed_signal,
                max_frac_dev=max_frac_dev,
                row_mean_var_slope=row_mean_var_slope,
                linearity_turnoff=linearity_turnoff, slot=slot, raft=raft))

        # Persist the png files.
        metadata = dict(DETECTOR=det_name, RUN=run,
                        TESTTYPE='FLAT', TEST_CATEGORY='EO')
        results.extend(siteUtils.persist_png_files(('%s_linearity*.png'
                                                    % file_prefix),
                                                   file_prefix,
                                                   metadata=metadata))
        results.extend(siteUtils.persist_png_files(('%s_row_means_variance.png'
                                                    % file_prefix),
                                                   file_prefix,
                                                   metadata=metadata))

    report_missing_data("validate_flat_pairs", missing_det_names)

    return results
def validate_dark_defects(results, det_names):
    """Validate and persist dark defects results."""
    run = siteUtils.getRunNumber()
    missing_det_names = []
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        mask_file = '%s_dark_pixel_mask.fits' % file_prefix
        if not os.path.isfile(mask_file):
            missing_det_names.append(det_name)
            continue
        eotestUtils.addHeaderData(mask_file,
                                  TESTTYPE='SFLAT_500',
                                  DATE=eotestUtils.utc_now_isoformat())
        results.append(siteUtils.make_fileref(mask_file))

        superflat = '%s_median_sflat.fits' % file_prefix
        eotestUtils.addHeaderData(superflat,
                                  DATE=eotestUtils.utc_now_isoformat())
        results.append(siteUtils.make_fileref(superflat))

        eotest_results = '%s_eotest_results.fits' % file_prefix
        data = sensorTest.EOTestResults(eotest_results)
        amps = data['AMP']
        npixels = data['NUM_DARK_PIXELS']
        ncolumns = data['NUM_DARK_COLUMNS']
        for amp, npix, ncol in zip(amps, npixels, ncolumns):
            results.append(
                lcatr.schema.valid(lcatr.schema.get('dark_defects_BOT'),
                                   amp=amp,
                                   dark_pixels=npix,
                                   dark_columns=ncol,
                                   slot=slot,
                                   raft=raft))

        # Persist the png files.
        metadata = dict(DETECTOR=det_name,
                        RUN=run,
                        TESTTYPE='SFLAT_500',
                        TEST_CATEGORY='EO')
        filename = '%s_superflat_dark_defects.png' % file_prefix
        results.extend(
            siteUtils.persist_png_files(filename,
                                        file_prefix,
                                        metadata=metadata))

    report_missing_data("validate_dark_defects", missing_det_names)

    return results
Exemple #22
0
def flat_pairs_jh_task(det_name):
    """JH version of single sensor execution of the flat pairs task."""
    import os
    import glob
    import siteUtils
    import json
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, bias_filename, flat_pairs_task, mondiode_value,\
        get_mask_files, medianed_dark_frame

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    flat_files \
        = siteUtils.dependency_glob(glob_pattern('flat_pairs', det_name),
                                    acq_jobname=acq_jobname)
    if not flat_files:
        print("flat_pairs_task: Flat pairs files not found for detector",
              det_name)
        return None

    mask_files = get_mask_files(det_name)
    eotest_results_file = '{}_eotest_results.fits'.format(file_prefix)
    gains = get_amplifier_gains(eotest_results_file)
    bias_frame = bias_filename(run, det_name)
    dark_frame = medianed_dark_frame(det_name)

    if 'LCATR_PD_CORRECTIONS_FILE' in os.environ:
        filter_corr_file = os.environ['LCATR_PD_CORRECTIONS_FILE']
    else:
        filter_corr_file = os.path.join(os.environ['EOANALYSISJOBSDIR'],
                                        'data', 'pd_filter_corrections.json')

    print('Using pd_filter_corrections file:', filter_corr_file, flush=True)

    with open(filter_corr_file) as fd:
        filter_corrections = json.load(fd)

    return flat_pairs_task(run,
                           det_name,
                           flat_files,
                           gains,
                           mask_files=mask_files,
                           bias_frame=bias_frame,
                           mondiode_func=mondiode_value,
                           dark_frame=dark_frame,
                           filter_corrections=filter_corrections)
def validate_flat_pairs(results, det_names):
    """Validate the flat pair analysis results."""
    run = siteUtils.getRunNumber()
    missing_det_names = []
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        det_resp_data = '%s_det_response.fits' % file_prefix
        if not os.path.isfile(det_resp_data):
            missing_det_names.append(det_name)
            continue
        eotestUtils.addHeaderData(det_resp_data,
                                  DETECTOR=det_name,
                                  TESTTYPE='FLAT',
                                  DATE=eotestUtils.utc_now_isoformat())
        results.append(siteUtils.make_fileref(det_resp_data))

        results_file = '%s_eotest_results.fits' % file_prefix
        data = sensorTest.EOTestResults(results_file)
        amps = data['AMP']
        full_well_data = data['FULL_WELL']
        max_frac_dev_data = data['MAX_FRAC_DEV']

        for amp, full_well, max_frac_dev in zip(amps, full_well_data,
                                                max_frac_dev_data):
            results.append(
                lcatr.schema.valid(lcatr.schema.get('flat_pairs_BOT'),
                                   amp=amp,
                                   full_well=full_well,
                                   max_frac_dev=max_frac_dev,
                                   slot=slot,
                                   raft=raft))

        # Persist the png files.
        metadata = dict(DETECTOR=det_name,
                        RUN=run,
                        TESTTYPE='FLAT',
                        TEST_CATEGORY='EO')
        results.extend(
            siteUtils.persist_png_files(('%s_linearity*.png' % file_prefix),
                                        file_prefix,
                                        metadata=metadata))

    report_missing_data("validate_flat_pairs", missing_det_names)

    return results
def validate_scan(results, det_names):
    """Validate scan mode analysis results."""
    run = siteUtils.getRunNumber()
    rafts = set()
    for det_name in det_names:
        raft, slot = det_name.split('_')
        rafts.add(raft)
        file_prefix = make_file_prefix(run, det_name)
        disp_files = glob.glob('{}_*_dispersion.png'.format(file_prefix))
        for item in disp_files:
            md = dict(raft=raft, slot=slot, run=run)
            results.append(siteUtils.make_fileref(item, metadata=md))
    for raft in rafts:
        multiscope_files = glob.glob('{}_{}_*multiscope.png'.format(raft, run))
        for item in multiscope_files:
            md = dict(raft=raft, run=run)
            results.append(siteUtils.make_fileref(item, metadata=md))
    return results
def validate_dark_current(results, det_names):
    """Validate and persist dark current results."""
    run = siteUtils.getRunNumber()
    missing_det_names = []
    for det_name in det_names:
        raft, slot = det_name.split('_')
        file_prefix = make_file_prefix(run, det_name)
        results_file = '%s_eotest_results.fits' % file_prefix
        if not os.path.isfile(results_file):
            missing_det_names.append(det_name)
            continue
        data = sensorTest.EOTestResults(results_file)

        amps = data['AMP']
        dc95s = data['DARK_CURRENT_95']
        for amp, dc95 in zip(amps, dc95s):
            results.append(
                lcatr.schema.valid(lcatr.schema.get('dark_current_BOT'),
                                   amp=amp,
                                   dark_current_95CL=dc95,
                                   slot=slot,
                                   raft=raft))

        # Persist the png files.
        metadata = dict(TESTTYPE='DARK',
                        TEST_CATEGORY='EO',
                        DETECTOR=det_name,
                        RUN=run)

        pattern = '{}_noise.png'.format(file_prefix)
        results.extend(
            siteUtils.persist_png_files(pattern,
                                        file_prefix,
                                        metadata=metadata))
        pattern = '{}_total_noise_hists.png'.format(file_prefix)
        results.extend(
            siteUtils.persist_png_files(pattern,
                                        file_prefix,
                                        metadata=metadata))

    report_missing_data("validate_dark_current", missing_det_names)

    return results
def dark_current_jh_task(det_name):
    """JH version of single sensor execution of the dark current task."""
    from collections import defaultdict
    from astropy.io import fits
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, bias_filename, dark_current_task,\
        plot_ccd_total_noise, get_mask_files
    from bot_data_handling import most_common_dark_files

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    dark_files \
        = siteUtils.dependency_glob(glob_pattern('dark_current', det_name),
                                    acq_jobname=acq_jobname,
                                    description="Dark current frames:")
    if not dark_files:
        print("dark_current_task: No dark files found for detector", det_name)
        return None

    dark_files_linear_fit = list(dark_files)
    dark_files = most_common_dark_files(dark_files)
    if len(dark_files_linear_fit) == len(dark_files):
        # These data only have one integration time, so skip linear
        # fit of dark current signal vs integration time.
        dark_files_linear_fit = None

    mask_files = get_mask_files(det_name)
    eotest_results_file \
        = siteUtils.dependency_glob('{}_eotest_results.fits'.format(file_prefix),
                                    jobname='read_noise_BOT')[0]
    gains = get_amplifier_gains('{}_eotest_results.fits'.format(file_prefix))
    bias_frame = bias_filename(run, det_name)

    dark_curr_pixels, dark95s \
        = dark_current_task(run, det_name, dark_files, gains,
                            mask_files=mask_files, bias_frame=bias_frame,
                            dark_files_linear_fit=dark_files_linear_fit)
    plot_ccd_total_noise(run, det_name, dark_curr_pixels, dark95s,
                         eotest_results_file)
    return dark_curr_pixels, dark95s
Exemple #27
0
def nonlinearity_jh_task(det_name):
    """JH version of single sensor execution of the non-linearity task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, nonlinearity_task

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)

    try:
        detresp_file \
            = siteUtils.dependency_glob(f'{det_name}*_det_response.fits',
                                        jobname='flat_pairs_BOT')[0]
    except IndexError:
        print("nonlinearity_task: detector response file not found for",
              det_name)
        return None

    outfile = f'{file_prefix}_nlc.fits'
    return nonlinearity_task(run, det_name, detresp_file, outfile)
Exemple #28
0
def validate_persistence(results, det_names):
    """Validate the persistence analysis results."""
    run = siteUtils.getRunNumber()
    results = []
    missing_det_names = set()
    for det_name in det_names:
        file_prefix = make_file_prefix(run, det_name)
        data_file = f'{file_prefix}_persistence_data.pickle'
        if not os.path.isfile(data_file):
            missing_det_names.add(det_name)
            continue
        md = dict(DATA_PRODUCT='persistence_task_results', RUN=run,
                  DETECTOR=det_name)
        results.append(siteUtils.make_fileref(data_file, metadata=md))
        png_files = [f'{file_prefix}_persistence.png']
        md = dict(TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run)
        results.extend(siteUtils.persist_png_files('', file_prefix,
                                                   png_files=png_files,
                                                   metadata=md))
    report_missing_data('validate_persistence', missing_det_names)
    return results
Exemple #29
0
def traps_jh_task(det_name):
    """JH version of single sensor execution of the traps analysis task."""
    import glob
    import siteUtils
    from bot_eo_analyses import make_file_prefix, glob_pattern,\
        get_amplifier_gains, bias_filename, traps_task, get_mask_files

    run = siteUtils.getRunNumber()
    file_prefix = make_file_prefix(run, det_name)
    acq_jobname = siteUtils.getProcessName('BOT_acq')

    trap_files = siteUtils.dependency_glob(glob_pattern('traps', det_name),
                                           acq_jobname=acq_jobname)
    if not trap_files:
        print("traps_task: No pocket pumping file found for detector",
              det_name)
        return None
    trap_file = trap_files[0]

    mask_files = get_mask_files(det_name)

    # Omit rolloff defects mask since a trap in the rolloff edge region can
    # affect the entire column.
    mask_files \
        = [item for item in mask_files if item.find('edge_rolloff') == -1]

    eotest_results_file = '{}_eotest_results.fits'.format(file_prefix)
    gains = get_amplifier_gains(eotest_results_file)

    bias_frame = bias_filename(run, det_name)

    return traps_task(run,
                      det_name,
                      trap_file,
                      gains,
                      mask_files=mask_files,
                      bias_frame=bias_frame)
Exemple #30
0
def raft_jh_signal_correlations(raft_name):
    """JH version of raft-level signal-correlation analysis."""
    import os
    import logging
    import numpy as np
    import matplotlib.pyplot as plt
    import siteUtils
    from bot_eo_analyses import bias_filename, make_file_prefix, \
        append_acq_run, glob_pattern
    from signal_correlations import raft_level_signal_correlations

    logger = logging.getLogger('raft_jh_noise_correlations')
    logger.setLevel(logging.INFO)

    # Use the high signal superflat files.
    pattern = glob_pattern('raft_signal_correlations', f'{raft_name}_S??')
    acq_jobname = siteUtils.getProcessName('BOT_acq')
    sflat_files = siteUtils.dependency_glob(pattern, acq_jobname=acq_jobname)
    folders = sorted(
        list(set([os.path.basename(os.path.dirname(_)) for _ in sflat_files])))
    logger.info(f'folders: {folders}')
    if not folders:
        logger.info('No data found for this raft, so skip the '
                    'signal correlation analysis.')
        return

    flat1_files = dict()
    flat2_files = dict()
    for item in sflat_files:
        folder = os.path.basename(os.path.dirname(item))
        if folder not in folders[:2]:
            continue
        logger.info(f'item: {item}')
        logger.info(f'folder: {folder}')
        basename = os.path.basename(item)
        logger.info(f'basename: {basename}')
        slot = basename.split('_')[-1][:-len('.fits')]
        if folder == folders[0]:
            flat1_files[slot] = item
        elif folder == folders[1]:
            flat2_files[slot] = item
    logger.info('flat pair files:')
    for slot in flat1_files:
        logger.info('  ' + flat1_files[slot])
        logger.info('  ' + flat2_files[slot])

    # Find the median bias files for the target raft.
    run = siteUtils.getRunNumber()
    bias_files = dict()
    for slot in flat1_files.keys():
        det_name = '_'.join((raft_name, slot))
        bias_files[slot] = bias_filename(run, det_name)

    file_prefix = make_file_prefix(run, raft_name)
    title = append_acq_run("Imaging region correlations, "
                           f"Run {run}, {raft_name}")
    raft_level_signal_correlations(flat1_files,
                                   flat2_files,
                                   bias_files,
                                   title=title)
    plt.savefig('{}_imaging_region_correlations.png'.format(file_prefix))