def validate_qe(results, det_names): """Validate the QE results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) qe_results_file = '%s_QE.fits' % file_prefix if not os.path.isfile(qe_results_file): missing_det_names.append(det_name) continue with fits.open(qe_results_file) as qe_results: qe_data = qe_results['QE_BANDS'].data QE = OrderedDict((band, []) for band in qe_data.field('BAND')) for amp in range(1, 17): values = qe_data.field('AMP%02i' % amp) for band, value in zip(QE, values): QE[band].append(value) for band in QE: for amp in range(1, 17): results.append( lcatr.schema.valid(lcatr.schema.get('qe_BOT_analysis'), band=band, QE=QE[band][amp - 1], amp=amp, slot=slot, raft=raft)) qe_files = glob.glob('%s_*QE*.fits' % file_prefix) for item in qe_files: eotestUtils.addHeaderData(item, TESTTYPE='LAMBDA', DATE=eotestUtils.utc_now_isoformat()) results.extend([siteUtils.make_fileref(item) for item in qe_files]) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='LAMBDA', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*qe.png' % file_prefix, file_prefix, metadata=metadata)) results.extend( siteUtils.persist_png_files('%s*flat.png' % file_prefix, file_prefix, metadata=metadata)) report_missing_data("validate_qe", missing_det_names) return results
def validate_read_noise(results, det_names): """Validate and persist read noise results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) read_noise_file = '%s_eotest_results.fits' % file_prefix if not os.path.isfile(read_noise_file): # No data for this detector, so note that and continue # with the others. missing_det_names.add(det_name) continue data = sensorTest.EOTestResults(read_noise_file) amps = data['AMP'] read_noise_data = data['READ_NOISE'] system_noise_data = data['SYSTEM_NOISE'] total_noise_data = data['TOTAL_NOISE'] for amp, read_noise, system_noise, total_noise \ in zip(amps, read_noise_data, system_noise_data, total_noise_data): results.append(lcatr.schema.valid( lcatr.schema.get('read_noise_BOT'), amp=amp, read_noise=read_noise, system_noise=system_noise, total_noise=total_noise, slot=slot, raft=raft)) files = glob.glob('%s_read_noise?*.fits' % file_prefix) for fitsfile in files: eotestUtils.addHeaderData(fitsfile, TESTTYPE='FE55', DATE=eotestUtils.utc_now_isoformat()) data_products = [siteUtils.make_fileref(item) for item in files] results.extend(data_products) # Persist the png files. metadata = dict(DETECTOR=det_name, TESTTYPE='FE55', TEST_CATEGORY='EO', RUN=run) filename = '%s_correlated_noise.png' % file_prefix results.extend(siteUtils.persist_png_files(filename, file_prefix, metadata=metadata)) # Persist the raft-level overscan correlation plots. for raft in camera_info.get_installed_raft_names(): metadata = dict(TESTTYPE='FE55', TEST_CATEGORY='EO', RAFT=raft, RUN=run) file_prefix = make_file_prefix(run, raft) filename = '%s_overscan_correlations.png' % file_prefix results.extend(siteUtils.persist_png_files(filename, file_prefix, metadata=metadata)) report_missing_data("validate_read_noise", missing_det_names) return results
def validate_flat_pairs(results, det_names): """Validate the flat pair analysis results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) det_resp_data = '%s_det_response.fits' % file_prefix if not os.path.isfile(det_resp_data): missing_det_names.add(det_name) continue eotestUtils.addHeaderData(det_resp_data, DETECTOR=det_name, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(det_resp_data)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] max_observed_signal_data = data['MAX_OBSERVED_SIGNAL'] max_frac_dev_data = data['MAX_FRAC_DEV'] row_mean_var_slope_data = data['ROW_MEAN_VAR_SLOPE'] linearity_turnoff_data = data['LINEARITY_TURNOFF'] for amp, max_observed_signal, max_frac_dev, row_mean_var_slope, \ linearity_turnoff in zip(amps, max_observed_signal_data, max_frac_dev_data, row_mean_var_slope_data, linearity_turnoff_data): results.append(lcatr.schema.valid( lcatr.schema.get('flat_pairs_BOT'), amp=amp, max_observed_signal=max_observed_signal, max_frac_dev=max_frac_dev, row_mean_var_slope=row_mean_var_slope, linearity_turnoff=linearity_turnoff, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend(siteUtils.persist_png_files(('%s_linearity*.png' % file_prefix), file_prefix, metadata=metadata)) results.extend(siteUtils.persist_png_files(('%s_row_means_variance.png' % file_prefix), file_prefix, metadata=metadata)) report_missing_data("validate_flat_pairs", missing_det_names) return results
def validate_overscan(results, det_names): """Validate the overscan analysis results.""" run = siteUtils.getRunNumber() results = [] missing_det_names = set() for det_name in det_names: file_prefix = make_file_prefix(run, det_name) results_file = f'{file_prefix}_overscan_results.fits' if not os.path.isfile(results_file): missing_det_names.add(det_name) else: md = dict(DATA_PRODUCT='overscan_task_results', RUN=run, DETECTOR=det_name) results.append(siteUtils.make_fileref(results_file, metadata=md)) png_files = (glob.glob(f'{file_prefix}_*_eper_*.png') + glob.glob(f'{file_prefix}_*_overscan_*.png') + glob.glob(f'{file_prefix}_*_cti.png')) md = dict(TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) results.extend( siteUtils.persist_png_files('', file_prefix, png_files=png_files, metadata=md)) report_missing_data('validate_overscan', missing_det_names) return results
def validate_dark_current(results, det_names): """Validate and persist dark current results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) results_file = '%s_eotest_results.fits' % file_prefix if not os.path.isfile(results_file): missing_det_names.append(det_name) continue data = sensorTest.EOTestResults(results_file) amps = data['AMP'] dc95s = data['DARK_CURRENT_95'] for amp, dc95 in zip(amps, dc95s): results.append( lcatr.schema.valid(lcatr.schema.get('dark_current_BOT'), amp=amp, dark_current_95CL=dc95, slot=slot, raft=raft)) # Persist the png files. metadata = dict(TESTTYPE='DARK', TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) pattern = '{}_noise.png'.format(file_prefix) results.extend( siteUtils.persist_png_files(pattern, file_prefix, metadata=metadata)) pattern = '{}_total_noise_hists.png'.format(file_prefix) results.extend( siteUtils.persist_png_files(pattern, file_prefix, metadata=metadata)) report_missing_data("validate_dark_current", missing_det_names) return results
def validate_brighter_fatter(results, det_names): """Validate the brighter-fatter results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) bf_results = '%s_bf.fits' % file_prefix if not os.path.isfile(bf_results): missing_det_names.add(det_name) continue eotestUtils.addHeaderData(bf_results, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(bf_results)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) columns = (data['AMP'], data['BF_XCORR'], data['BF_XCORR_ERR'], data['BF_YCORR'], data['BF_YCORR_ERR'], data['BF_SLOPEX'], data['BF_SLOPEX_ERR'], data['BF_SLOPEY'], data['BF_SLOPEY_ERR'], data['BF_MEAN']) for amp, bf_xcorr, bf_xcorr_err, bf_ycorr, bf_ycorr_err, \ bf_slopex, bf_slopex_err, bf_slopey, bf_slopey_err, bf_mean \ in zip(*columns): results.append( lcatr.schema.valid(lcatr.schema.get('brighter_fatter_BOT'), amp=amp, bf_xcorr=bf_xcorr, bf_xcorr_err=bf_xcorr_err, bf_ycorr=bf_ycorr, bf_ycorr_err=bf_ycorr_err, bf_slopex=bf_slopex, bf_slopex_err=bf_slopex_err, bf_slopey=bf_slopey, bf_slopey_err=bf_slopey_err, bf_mean=bf_mean, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*brighter-fatter.png' % file_prefix, file_prefix, metadata=metadata)) return results
def validate_ptc(results, det_names): """Validate the PTC results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) ptc_results = '%s_ptc.fits' % file_prefix if not os.path.isfile(ptc_results): missing_det_names.append(det_name) continue eotestUtils.addHeaderData(ptc_results, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(ptc_results)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) columns = (data['AMP'], data['PTC_GAIN'], data['PTC_GAIN_ERROR'], data['PTC_A00'], data['PTC_A00_ERROR'], data['PTC_NOISE'], data['PTC_NOISE_ERROR'], data['PTC_TURNOFF']) for amp, gain, gain_error, a00, a00_error,\ noise, noise_error, turnoff in zip(*columns): results.append( lcatr.schema.valid(lcatr.schema.get('ptc_BOT'), amp=amp, ptc_gain=gain, ptc_gain_error=gain_error, ptc_a00=a00, ptc_a00_error=a00_error, ptc_noise=noise, ptc_noise_error=noise_error, ptc_turnoff=turnoff, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*ptcs.png' % file_prefix, file_prefix, metadata=metadata)) report_missing_data("validate_ptc", missing_det_names) return results
def validate_dark_defects(results, det_names): """Validate and persist dark defects results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) mask_file = '%s_dark_pixel_mask.fits' % file_prefix if not os.path.isfile(mask_file): missing_det_names.append(det_name) continue eotestUtils.addHeaderData(mask_file, TESTTYPE='SFLAT_500', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(mask_file)) superflat = '%s_median_sflat.fits' % file_prefix eotestUtils.addHeaderData(superflat, DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(superflat)) eotest_results = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(eotest_results) amps = data['AMP'] npixels = data['NUM_DARK_PIXELS'] ncolumns = data['NUM_DARK_COLUMNS'] for amp, npix, ncol in zip(amps, npixels, ncolumns): results.append( lcatr.schema.valid(lcatr.schema.get('dark_defects_BOT'), amp=amp, dark_pixels=npix, dark_columns=ncol, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='SFLAT_500', TEST_CATEGORY='EO') filename = '%s_superflat_dark_defects.png' % file_prefix results.extend( siteUtils.persist_png_files(filename, file_prefix, metadata=metadata)) report_missing_data("validate_dark_defects", missing_det_names) return results
def validate_flat_pairs(results, det_names): """Validate the flat pair analysis results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) det_resp_data = '%s_det_response.fits' % file_prefix if not os.path.isfile(det_resp_data): missing_det_names.append(det_name) continue eotestUtils.addHeaderData(det_resp_data, DETECTOR=det_name, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(det_resp_data)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] full_well_data = data['FULL_WELL'] max_frac_dev_data = data['MAX_FRAC_DEV'] for amp, full_well, max_frac_dev in zip(amps, full_well_data, max_frac_dev_data): results.append( lcatr.schema.valid(lcatr.schema.get('flat_pairs_BOT'), amp=amp, full_well=full_well, max_frac_dev=max_frac_dev, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files(('%s_linearity*.png' % file_prefix), file_prefix, metadata=metadata)) report_missing_data("validate_flat_pairs", missing_det_names) return results
def validate_raft_results(results, raft_names): """Validate the raft level results.""" run = siteUtils.getRunNumber() slot_names = camera_info.get_slot_names() md = siteUtils.DataCatalogMetadata(ORIGIN=siteUtils.getSiteName(), TEST_CATEGORY='EO', DATA_PRODUCT='EOTEST_RESULTS') missing_raft_names = [] for raft_name in raft_names: for slot_name in slot_names: det_name = make_file_prefix(raft_name, slot_name) file_prefix = make_file_prefix(run, det_name) results_file = '{}_eotest_results.fits'.format(file_prefix) if not os.path.isfile(results_file): if raft_name not in missing_raft_names: missing_raft_names.append(raft_name) continue eotestUtils.addHeaderData(results_file, DETECTOR=det_name, DATE=eotestUtils.utc_now_isoformat(), RUNNUM=run) results.append( lcatr.schema.fileref.make(results_file, metadata=md(SLOT=slot_name, RAFT=raft_name))) # Persist the png files. png_file_list = '{}_raft_results_task_png_files.txt'.format(raft_name) if not os.path.isfile(png_file_list): continue with open(png_file_list, 'r') as input_: png_files = [x.strip() for x in input_] metadata = dict(TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) results.extend( siteUtils.persist_png_files('', file_prefix, png_files=png_files, metadata=metadata)) report_missing_data("validate_raft_results", missing_raft_names, components='rafts', total=21) return results
def validate_persistence(results, det_names): """Validate the persistence analysis results.""" run = siteUtils.getRunNumber() results = [] missing_det_names = set() for det_name in det_names: file_prefix = make_file_prefix(run, det_name) data_file = f'{file_prefix}_persistence_data.pickle' if not os.path.isfile(data_file): missing_det_names.add(det_name) continue md = dict(DATA_PRODUCT='persistence_task_results', RUN=run, DETECTOR=det_name) results.append(siteUtils.make_fileref(data_file, metadata=md)) png_files = [f'{file_prefix}_persistence.png'] md = dict(TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) results.extend(siteUtils.persist_png_files('', file_prefix, png_files=png_files, metadata=md)) report_missing_data('validate_persistence', missing_det_names) return results
DATA_PRODUCT='EOTEST_RESULTS') # Persist eotest_results files for each sensor. raft_id = siteUtils.getUnitId() raft = camera_components.Raft.create_from_etrav(raft_id) for slot, sensor_id in raft.items(): ccd_vendor = sensor_id.split('-')[0].upper() results_file = '%s_eotest_results.fits' % sensor_id eotestUtils.addHeaderData(results_file, LSST_NUM=sensor_id, DATE=eotestUtils.utc_now_isoformat(), CCD_MANU=ccd_vendor, RUNNUM=run_number) results.append( lcatr.schema.fileref.make(results_file, metadata=md(CCD_MANU=ccd_vendor, LSST_NUM=sensor_id, SLOT=slot, LsstId=raft_id))) # Persist the png files. metadata = dict(CCD_MANU=ccd_vendor, TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*.png' % raft_id, raft_id, metadata=metadata)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()
def validate_dark_current(results, det_names): """Validate and persist dark current results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) results_file = '%s_eotest_results.fits' % file_prefix if not os.path.isfile(results_file): missing_det_names.add(det_name) continue data = sensorTest.EOTestResults(results_file) amps = data['AMP'] dc95s = data['DARK_CURRENT_95'] dark_current_medians = data['DARK_CURRENT_MEDIAN'] for amp, dc95, dcmed in zip(amps, dc95s, dark_current_medians): results.append( lcatr.schema.valid(lcatr.schema.get('dark_current_BOT'), amp=amp, dark_current_95CL=dc95, dark_current_median=dcmed, slot=slot, raft=raft)) try: slopes = data['DARK_CURRENT_SLOPE'] intercepts = data['DARK_CURRENT_INTERCEPT'] except KeyError: # Dark current fit as a function of integration time was # not performed, so skip the dark_current_fit_BOT schema. pass else: for amp, slope, intercept in zip(amps, slopes, intercepts): results.append( lcatr.schema.valid( lcatr.schema.get('dark_current_fit_BOT'), amp=amp, dark_current_slope=slope, dark_current_intercept=intercept, slot=slot, raft=raft)) # Persist the png files. metadata = dict(TESTTYPE='DARK', TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) pattern = '{}_noise.png'.format(file_prefix) results.extend( siteUtils.persist_png_files(pattern, file_prefix, metadata=metadata)) pattern = '{}_total_noise_hists.png'.format(file_prefix) results.extend( siteUtils.persist_png_files(pattern, file_prefix, metadata=metadata)) report_missing_data("validate_dark_current", missing_det_names) return results
def validate_flat_pairs(results, det_names): """Validate the flat pair analysis results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) det_resp_data = '%s_det_response.fits' % file_prefix if not os.path.isfile(det_resp_data): missing_det_names.add(det_name) continue eotestUtils.addHeaderData(det_resp_data, DETECTOR=det_name, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(det_resp_data)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] max_observed_signal_data = data['MAX_OBSERVED_SIGNAL'] max_frac_dev_data = data['MAX_FRAC_DEV'] row_mean_var_slope_data = data['ROW_MEAN_VAR_SLOPE'] linearity_turnoff_data = data['LINEARITY_TURNOFF'] for amp, max_observed_signal, max_frac_dev, row_mean_var_slope, \ linearity_turnoff in zip(amps, max_observed_signal_data, max_frac_dev_data, row_mean_var_slope_data, linearity_turnoff_data): results.append( lcatr.schema.valid(lcatr.schema.get('flat_pairs_BOT'), amp=amp, max_observed_signal=max_observed_signal, max_frac_dev=max_frac_dev, row_mean_var_slope=row_mean_var_slope, linearity_turnoff=linearity_turnoff, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files(('%s_linearity*.png' % file_prefix), file_prefix, metadata=metadata)) results.extend( siteUtils.persist_png_files( ('%s_row_means_variance.png' % file_prefix), file_prefix, metadata=metadata)) # Persist the raft-level imaging region correlation plots. missing_raft_names = set() for raft in camera_info.get_installed_raft_names(): metadata = dict(TESTTYPE='FLAT', TEST_CATEGORY='EO', RAFT=raft, RUN=run) file_prefix = make_file_prefix(run, raft) filename = f'{file_prefix}_imaging_region_correlations.png' if not os.path.isfile(filename): missing_raft_names.add(raft) continue results.extend( siteUtils.persist_png_files(filename, file_prefix, metadata=metadata)) # Persist any pd correction file specified in the lcatr.cfg file. pd_corrections_file_env = 'LCATR_PD_CORRECTIONS_FILE' if pd_corrections_file_env in os.environ: pd_corrections_file = os.environ[pd_corrections_file_env] shutil.copy(pd_corrections_file, '.') fileref = siteUtils.make_fileref(os.path.basename(pd_corrections_file)) results.append(fileref) report_missing_data("validate_flat_pairs", missing_det_names) report_missing_data("validate_flat_pairs", sorted(list(missing_raft_names)), components='rafts', total=21) return results
results.append(siteUtils.make_fileref(ptc_results, folder=slot)) results_file = '%s_eotest_results.fits' % wgSlotName data = sensorTest.EOTestResults(results_file) amps = data['AMP'] ptc_gains = data['PTC_GAIN'] ptc_gain_errors = data['PTC_GAIN_ERROR'] for amp, gain, gain_error in zip(amps, ptc_gains, ptc_gain_errors): results.append( lcatr.schema.valid(lcatr.schema.get('ptc_raft'), amp=amp, ptc_gain=gain, ptc_gain_error=gain_error, slot=slot, sensor_id=wgSlotName)) # Persist the png files. metadata = dict(CCD_MANU=ccd_vendor, LSST_NUM=sensor_id, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*.png' % wgSlotName, sensor_id, folder=slot, metadata=metadata)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()
def persist_fe55_analysis_results(): """Persist the results from the full analysis.""" raft_id = siteUtils.getUnitId() raft = camera_components.Raft.create_from_etrav(raft_id) results = [] for slot, sensor_id in raft.items(): ccd_vendor = sensor_id.split('-')[0].upper() # The output files from producer script. gain_file = '%(sensor_id)s_eotest_results.fits' % locals() psf_results = glob.glob('%(sensor_id)s_psf_results*.fits' % locals())[0] rolloff_mask = '%(sensor_id)s_rolloff_defects_mask.fits' % locals() output_files = gain_file, psf_results, rolloff_mask # Add/update the metadata to the primary HDU of these files. for fitsfile in output_files: eotestUtils.addHeaderData(fitsfile, LSST_NUM=sensor_id, TESTTYPE='FE55', DATE=eotestUtils.utc_now_isoformat(), CCD_MANU=ccd_vendor) # # Persist the median bias FITS file. # bias_median_file = glob.glob(f'{sensor_id}_*_median_bias.fits')[0] results.append(siteUtils.make_fileref(bias_median_file, folder=slot)) # Persist the png files. metadata = dict(CCD_MANU=ccd_vendor, LSST_NUM=sensor_id, TESTTYPE='FE55', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*.png' % sensor_id, sensor_id, folder=slot, metadata=metadata)) data = sensorTest.EOTestResults(gain_file) amps = data['AMP'] gain_data = data['GAIN'] gain_errors = data['GAIN_ERROR'] sigmas = data['PSF_SIGMA'] for amp, gain_value, gain_error, sigma in zip(amps, gain_data, gain_errors, sigmas): if not np.isfinite(gain_error): gain_error = -1 results.append( lcatr.schema.valid(lcatr.schema.get('fe55_raft_analysis'), amp=amp, gain=gain_value, gain_error=gain_error, psf_sigma=sigma, slot=slot, sensor_id=sensor_id)) results.extend([lcatr.schema.fileref.make(x) for x in output_files]) return results
TESTTYPE='FE55', DATE=eotestUtils.utc_now_isoformat(), CCD_MANU=ccd_vendor) data_products = [ siteUtils.make_fileref(item, folder=slot) for item in files ] results.extend(data_products) # Persist the png files. metadata = dict(CCD_MANU=ccd_vendor, LSST_NUM=sensor_id, TESTTYPE='FE55', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*.png' % sensor_id, sensor_id, folder=slot, metadata=metadata)) # Persist the raft-level overscan correlation plot. metadata = dict(LSST_NUM=raft_id, TESTTYPE='FE55', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*.png' % raft_id, raft_id, metadata=metadata)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()
# Persist eotest_results files for each sensor. raft_id = siteUtils.getUnitId() raft = camera_components.Raft.create_from_etrav(raft_id) for slot, sensor_id in raft.items(): ccd_vendor = sensor_id.split('-')[0].upper() if 'ccd2' in slot : continue wgSlotName = siteUtils.getWGSlotNames(raft)[sensor_id] results_file = '%s_eotest_results.fits' % wgSlotName eotestUtils.addHeaderData(results_file, LSST_NUM=sensor_id, DATE=eotestUtils.utc_now_isoformat(), CCD_MANU=ccd_vendor, RUNNUM=run_number) results.append(lcatr.schema.fileref.make(results_file, metadata=md(CCD_MANU=ccd_vendor, LSST_NUM=sensor_id, SLOT=slot, LsstId=raft_id))) # Persist the png files. metadata = dict(CCD_MANU=ccd_vendor, TEST_CATEGORY='EO') results.extend(siteUtils.persist_png_files('%s*.png' % raft_id, raft_id, metadata=metadata)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()
def validate_fe55(results, det_names): """Validate and persist fe55 gain and psf results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) # The output files from producer script. gain_file = '%(file_prefix)s_eotest_results.fits' % locals() psf_results_files \ = glob.glob('%(file_prefix)s_psf_results*.fits' % locals()) if not os.path.isfile(gain_file) or not psf_results_files: # Results for this detector are not available so note # that and continue with the others. missing_det_names.append(det_name) continue psf_results = psf_results_files[0] rolloff_mask = '%(file_prefix)s_edge_rolloff_mask.fits' % locals() output_files = psf_results, rolloff_mask # Add/update the metadata to the primary HDU of these files. for fitsfile in output_files: eotestUtils.addHeaderData(fitsfile, TESTTYPE='FE55', DATE=eotestUtils.utc_now_isoformat()) results.extend([lcatr.schema.fileref.make(x) for x in output_files]) # Persist the median bias FITS file. bias_frame \ = glob.glob('%(file_prefix)s_median_bias.fits' % locals())[0] results.append(siteUtils.make_fileref(bias_frame)) # Persist the png files. png_file_list = '{}_fe55_task_png_files.txt'.format(det_name) with open(png_file_list, 'r') as input_: png_files = [x.strip() for x in input_] metadata = dict(TESTTYPE='FE55', TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) results.extend( siteUtils.persist_png_files('', file_prefix, png_files=png_files, metadata=metadata)) data = sensorTest.EOTestResults(gain_file) amps = data['AMP'] gain_data = data['GAIN'] gain_errors = data['GAIN_ERROR'] sigmas = data['PSF_SIGMA'] for amp, gain_value, gain_error, sigma in zip(amps, gain_data, gain_errors, sigmas): if not np.isfinite(gain_error): gain_error = -1 results.append( lcatr.schema.valid(lcatr.schema.get('fe55_BOT_analysis'), amp=amp, gain=gain_value, gain_error=gain_error, psf_sigma=sigma, slot=slot, raft=raft)) report_missing_data('validate_fe55', missing_det_names) return results
= aliveness_utils.raft_channel_statuses(fits_files) for slot in channel_status: bad_channels = 0 for amp, status in channel_status[slot].items(): if channel_status[slot][amp] == 'bad': bad_channels += 1 signal = channel_signal[slot][amp] channel = imutils.channelIds[amp] output.write(row_template % locals()) results.append(lcatr.schema.valid(job_schema, exptime=exptime, slot=slot, bad_channels=bad_channels)) results.append(lcatr.schema.fileref.make(outfile)) # Persist the sequencer file that was used. seq_file = glob.glob('*.seq')[0] results.append(lcatr.schema.fileref.make(seq_file)) # Add png files. raft_id = siteUtils.getUnitId() md = siteUtils.DataCatalogMetadata(ORIGIN=siteUtils.getSiteName(), TEST_CATEGORY='EO') results.extend(siteUtils.persist_png_files('*.png', raft_id, metadata=md)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()
def validate_cte(results, det_names): """Validate the CTE task results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) superflats \ = sorted(glob.glob('{}_superflat_*.fits'.format(file_prefix))) if not superflats: missing_det_names.append(det_name) continue for item in superflats: eotestUtils.addHeaderData(item, FILENAME=item, DATE=eotestUtils.utc_now_isoformat()) results.extend([siteUtils.make_fileref(x) for x in superflats]) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] cti_high_serial = data['CTI_HIGH_SERIAL'] cti_high_serial_error = data['CTI_HIGH_SERIAL_ERROR'] cti_high_parallel = data['CTI_HIGH_PARALLEL'] cti_high_parallel_error = data['CTI_HIGH_PARALLEL_ERROR'] cti_low_serial = data['CTI_LOW_SERIAL'] cti_low_serial_error = data['CTI_LOW_SERIAL_ERROR'] cti_low_parallel = data['CTI_LOW_PARALLEL'] cti_low_parallel_error = data['CTI_LOW_PARALLEL_ERROR'] for values in zip(amps, cti_high_serial, cti_high_serial_error, cti_high_parallel, cti_high_parallel_error, cti_low_serial, cti_low_serial_error, cti_low_parallel, cti_low_parallel_error): results.append( lcatr.schema.valid(lcatr.schema.get('cte_BOT'), amp=values[0], cti_high_serial=values[1], cti_high_serial_error=values[2], cti_high_parallel=values[3], cti_high_parallel_error=values[4], cti_low_serial=values[5], cti_low_serial_error=values[6], cti_low_parallel=values[7], cti_low_parallel_error=values[8], slot=slot, raft=raft)) # Persist the png files. png_file_list = '{}_cte_task_png_files.txt'.format(det_name) with open(png_file_list, 'r') as input_: png_files = [x.strip() for x in input_] metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='SFLAT_500', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('', file_prefix, png_files=png_files, metadata=metadata)) report_missing_data("validate_cte", missing_det_names) return results
cti_low_parallel = data['CTI_LOW_PARALLEL'] cti_low_parallel_error = data['CTI_LOW_PARALLEL_ERROR'] for values in zip(amps, cti_high_serial, cti_high_serial_error, cti_high_parallel, cti_high_parallel_error, cti_low_serial, cti_low_serial_error, cti_low_parallel, cti_low_parallel_error): results.append(lcatr.schema.valid(lcatr.schema.get('cte_raft'), amp=values[0], cti_high_serial=values[1], cti_high_serial_error=values[2], cti_high_parallel=values[3], cti_high_parallel_error=values[4], cti_low_serial=values[5], cti_low_serial_error=values[6], cti_low_parallel=values[7], cti_low_parallel_error=values[8], slot=slot, sensor_id=wgSlotName)) # Persist the png files. metadata = dict(CCD_MANU=ccd_vendor, LSST_NUM=sensor_id, TESTTYPE='SFLAT_500', TEST_CATEGORY='EO') results.extend(siteUtils.persist_png_files('%s*.png' % sensor_id, sensor_id, folder=slot, metadata=metadata)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()
#!/usr/bin/env ipython """ Validator script for BOT raft-level results summaries. """ import glob import lcatr.schema import siteUtils from camera_components import camera_info from bot_eo_validators import validate_raft_results results = [] results = validate_raft_results(results, camera_info.get_installed_raft_names()) # # Validate focal plane heat map plots # unit_id = siteUtils.getUnitId() run = siteUtils.getRunNumber() png_files = glob.glob('{}_{}*.png'.format(unit_id, run)) results.extend(siteUtils.persist_png_files('', unit_id, png_files=png_files)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()