def persist_traps_analysis(): """Persist the results from the traps analysis.""" raft_id = siteUtils.getUnitId() raft = camera_components.Raft.create_from_etrav(raft_id) results = [] for slot, sensor_id in raft.items(): ccd_vendor = sensor_id.split('-')[0].upper() trap_file = '%s_traps.fits' % sensor_id eotestUtils.addHeaderData(trap_file, LSST_NUM=sensor_id, TESTTYPE='TRAP', DATE=eotestUtils.utc_now_isoformat(), CCD_MANU=ccd_vendor) results.append(siteUtils.make_fileref(trap_file, folder=slot)) mask_file = '%s_traps_mask.fits' % sensor_id results.append(siteUtils.make_fileref(mask_file, folder=slot)) results_file = '%s_eotest_results.fits' % sensor_id data = sensorTest.EOTestResults(results_file) amps = data['AMP'] num_traps = data['NUM_TRAPS'] for amp, ntrap in zip(amps, num_traps): results.append( lcatr.schema.valid(lcatr.schema.get('traps_raft'), amp=amp, num_traps=ntrap, slot=slot, sensor_id=sensor_id)) return results
def __init__(self, outfile='eotest_results.fits'): """ Repackage per amp information in the json-formatted summary.lims files from each analysis task into the EOTestResults-formatted output. """ self.eotest_results = sensorTest.EOTestResults(outfile)
def validate_traps(results, det_names): """Validate and persist trap results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) trap_file = '%s_traps.fits' % file_prefix if not os.path.isfile(trap_file): missing_det_names.append(det_name) continue eotestUtils.addHeaderData(trap_file, TESTTYPE='TRAP', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(trap_file)) mask_file = '%s_traps_mask.fits' % file_prefix results.append(siteUtils.make_fileref(mask_file)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] num_traps = data['NUM_TRAPS'] for amp, ntrap in zip(amps, num_traps): results.append( lcatr.schema.valid(lcatr.schema.get('traps_BOT'), amp=amp, num_traps=ntrap, slot=slot, raft=raft)) report_missing_data("validate_traps", missing_det_names) return results
def _get_amplifier_gains(file_pattern=None): """Extract the gains for each amp in an eotest_results file.""" if (os.environ.get('LCATR_USE_UNIT_GAINS', 'False') == 'True' or file_pattern is None): print("_get_amplifier_gains: using unit gains") return {amp: 1 for amp in range(1, 17)} # Attempt to retrieve gains from fe55_analysis_BOT then ptc_BOT. # If neither are available, then use unit gains. print("_get_amplifier_gains: trying fe55_analysis_BOT") results_files = siteUtils.dependency_glob(file_pattern, jobname='fe55_analysis_BOT') if not results_files: print("_get_amplifier_gains: trying ptc_BOT") results_files = siteUtils.dependency_glob(file_pattern, jobname='ptc_BOT') if not results_files: print("_get_amplifier_gains: both fe55 and ptc retrievals failed. " "using unit gains.") return {amp: 1 for amp in range(1, 17)} eotest_results_file = results_files[0] data = sensorTest.EOTestResults(eotest_results_file) amps = data['AMP'] gains = data['GAIN'] return dict(zip(amps, gains))
def write_nominal_gains(sensor_id, gain=1): """Write nominal gains in lieu of doing the Fe55 analysis.""" import lsst.eotest.sensor as sensorTest results_file = '%s_eotest_results.fits' % sensor_id results = sensorTest.EOTestResults(results_file) for amp in range(1, 17): results.add_seg_result(amp, 'GAIN', gain) results.write(clobber=True)
def __init__(self, outfile='eotest_results.fits', namps=16): """ Constructor Parameters ---------- outfile : str, optional Output filename of FITS file to contain the results as written by self.eotest_results. """ self.eotest_results = sensorTest.EOTestResults(outfile, namps=namps)
def validate_brighter_fatter(results, det_names): """Validate the brighter-fatter results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) bf_results = '%s_bf.fits' % file_prefix if not os.path.isfile(bf_results): missing_det_names.add(det_name) continue eotestUtils.addHeaderData(bf_results, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(bf_results)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) columns = (data['AMP'], data['BF_XCORR'], data['BF_XCORR_ERR'], data['BF_YCORR'], data['BF_YCORR_ERR'], data['BF_SLOPEX'], data['BF_SLOPEX_ERR'], data['BF_SLOPEY'], data['BF_SLOPEY_ERR'], data['BF_MEAN']) for amp, bf_xcorr, bf_xcorr_err, bf_ycorr, bf_ycorr_err, \ bf_slopex, bf_slopex_err, bf_slopey, bf_slopey_err, bf_mean \ in zip(*columns): results.append( lcatr.schema.valid(lcatr.schema.get('brighter_fatter_BOT'), amp=amp, bf_xcorr=bf_xcorr, bf_xcorr_err=bf_xcorr_err, bf_ycorr=bf_ycorr, bf_ycorr_err=bf_ycorr_err, bf_slopex=bf_slopex, bf_slopex_err=bf_slopex_err, bf_slopey=bf_slopey, bf_slopey_err=bf_slopey_err, bf_mean=bf_mean, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*brighter-fatter.png' % file_prefix, file_prefix, metadata=metadata)) return results
def validate_read_noise(results, det_names): """Validate and persist read noise results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) read_noise_file = '%s_eotest_results.fits' % file_prefix if not os.path.isfile(read_noise_file): # No data for this detector, so note that and continue # with the others. missing_det_names.add(det_name) continue data = sensorTest.EOTestResults(read_noise_file) amps = data['AMP'] read_noise_data = data['READ_NOISE'] system_noise_data = data['SYSTEM_NOISE'] total_noise_data = data['TOTAL_NOISE'] for amp, read_noise, system_noise, total_noise \ in zip(amps, read_noise_data, system_noise_data, total_noise_data): results.append(lcatr.schema.valid( lcatr.schema.get('read_noise_BOT'), amp=amp, read_noise=read_noise, system_noise=system_noise, total_noise=total_noise, slot=slot, raft=raft)) files = glob.glob('%s_read_noise?*.fits' % file_prefix) for fitsfile in files: eotestUtils.addHeaderData(fitsfile, TESTTYPE='FE55', DATE=eotestUtils.utc_now_isoformat()) data_products = [siteUtils.make_fileref(item) for item in files] results.extend(data_products) # Persist the png files. metadata = dict(DETECTOR=det_name, TESTTYPE='FE55', TEST_CATEGORY='EO', RUN=run) filename = '%s_correlated_noise.png' % file_prefix results.extend(siteUtils.persist_png_files(filename, file_prefix, metadata=metadata)) # Persist the raft-level overscan correlation plots. for raft in camera_info.get_installed_raft_names(): metadata = dict(TESTTYPE='FE55', TEST_CATEGORY='EO', RAFT=raft, RUN=run) file_prefix = make_file_prefix(run, raft) filename = '%s_overscan_correlations.png' % file_prefix results.extend(siteUtils.persist_png_files(filename, file_prefix, metadata=metadata)) report_missing_data("validate_read_noise", missing_det_names) return results
def __init__(self, results_files): """ Constructor. Parameters ---------- results_files : dict """ self.results = dict() self.sensor_ids = dict() for slot, filename in results_files.items(): self.sensor_ids[slot] = filename.split('_')[0] self.results[slot] = sensorTest.EOTestResults(filename)
def validate_ptc(results, det_names): """Validate the PTC results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) ptc_results = '%s_ptc.fits' % file_prefix if not os.path.isfile(ptc_results): missing_det_names.append(det_name) continue eotestUtils.addHeaderData(ptc_results, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(ptc_results)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) columns = (data['AMP'], data['PTC_GAIN'], data['PTC_GAIN_ERROR'], data['PTC_A00'], data['PTC_A00_ERROR'], data['PTC_NOISE'], data['PTC_NOISE_ERROR'], data['PTC_TURNOFF']) for amp, gain, gain_error, a00, a00_error,\ noise, noise_error, turnoff in zip(*columns): results.append( lcatr.schema.valid(lcatr.schema.get('ptc_BOT'), amp=amp, ptc_gain=gain, ptc_gain_error=gain_error, ptc_a00=a00, ptc_a00_error=a00_error, ptc_noise=noise, ptc_noise_error=noise_error, ptc_turnoff=turnoff, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*ptcs.png' % file_prefix, file_prefix, metadata=metadata)) report_missing_data("validate_ptc", missing_det_names) return results
def validate_flat_pairs(results, det_names): """Validate the flat pair analysis results.""" run = siteUtils.getRunNumber() missing_det_names = set() for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) det_resp_data = '%s_det_response.fits' % file_prefix if not os.path.isfile(det_resp_data): missing_det_names.add(det_name) continue eotestUtils.addHeaderData(det_resp_data, DETECTOR=det_name, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(det_resp_data)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] max_observed_signal_data = data['MAX_OBSERVED_SIGNAL'] max_frac_dev_data = data['MAX_FRAC_DEV'] row_mean_var_slope_data = data['ROW_MEAN_VAR_SLOPE'] linearity_turnoff_data = data['LINEARITY_TURNOFF'] for amp, max_observed_signal, max_frac_dev, row_mean_var_slope, \ linearity_turnoff in zip(amps, max_observed_signal_data, max_frac_dev_data, row_mean_var_slope_data, linearity_turnoff_data): results.append(lcatr.schema.valid( lcatr.schema.get('flat_pairs_BOT'), amp=amp, max_observed_signal=max_observed_signal, max_frac_dev=max_frac_dev, row_mean_var_slope=row_mean_var_slope, linearity_turnoff=linearity_turnoff, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend(siteUtils.persist_png_files(('%s_linearity*.png' % file_prefix), file_prefix, metadata=metadata)) results.extend(siteUtils.persist_png_files(('%s_row_means_variance.png' % file_prefix), file_prefix, metadata=metadata)) report_missing_data("validate_flat_pairs", missing_det_names) return results
def getSensorGains(jobname='fe55_analysis', sensor_id=None): if sensor_id is None: sensor_id = siteUtils.getUnitId() try: gain_file = dependency_glob('%s_eotest_results.fits' % sensor_id, jobname=jobname)[0] except IndexError: raise RuntimeError('eotestUtils.getSensorGains: %s %s' % (sensor_id, jobname)) data = sensorTest.EOTestResults(gain_file) amps = data['AMP'] gains = data['GAIN'] sensorGains = dict([(amp, gains[amp-1]) for amp in amps]) return sensorGains
def validate_dark_defects(results, det_names): """Validate and persist dark defects results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) mask_file = '%s_dark_pixel_mask.fits' % file_prefix if not os.path.isfile(mask_file): missing_det_names.append(det_name) continue eotestUtils.addHeaderData(mask_file, TESTTYPE='SFLAT_500', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(mask_file)) superflat = '%s_median_sflat.fits' % file_prefix eotestUtils.addHeaderData(superflat, DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(superflat)) eotest_results = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(eotest_results) amps = data['AMP'] npixels = data['NUM_DARK_PIXELS'] ncolumns = data['NUM_DARK_COLUMNS'] for amp, npix, ncol in zip(amps, npixels, ncolumns): results.append( lcatr.schema.valid(lcatr.schema.get('dark_defects_BOT'), amp=amp, dark_pixels=npix, dark_columns=ncol, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='SFLAT_500', TEST_CATEGORY='EO') filename = '%s_superflat_dark_defects.png' % file_prefix results.extend( siteUtils.persist_png_files(filename, file_prefix, metadata=metadata)) report_missing_data("validate_dark_defects", missing_det_names) return results
def validate_flat_pairs(results, det_names): """Validate the flat pair analysis results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) det_resp_data = '%s_det_response.fits' % file_prefix if not os.path.isfile(det_resp_data): missing_det_names.append(det_name) continue eotestUtils.addHeaderData(det_resp_data, DETECTOR=det_name, TESTTYPE='FLAT', DATE=eotestUtils.utc_now_isoformat()) results.append(siteUtils.make_fileref(det_resp_data)) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] full_well_data = data['FULL_WELL'] max_frac_dev_data = data['MAX_FRAC_DEV'] for amp, full_well, max_frac_dev in zip(amps, full_well_data, max_frac_dev_data): results.append( lcatr.schema.valid(lcatr.schema.get('flat_pairs_BOT'), amp=amp, full_well=full_well, max_frac_dev=max_frac_dev, slot=slot, raft=raft)) # Persist the png files. metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='FLAT', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files(('%s_linearity*.png' % file_prefix), file_prefix, metadata=metadata)) report_missing_data("validate_flat_pairs", missing_det_names) return results
def getSensorGains(jobname='fe55_analysis', sensor_id=None): if (os.environ.get('LCATR_USE_UNIT_GAINS', 'False') == 'True' or os.environ.get("LCATR_SKIP_FE55_ANALYSIS", "False") == "True"): return {amp: 1 for amp in range(1, 17)} if sensor_id is None: sensor_id = siteUtils.getUnitId() try: gain_file = dependency_glob('%s_eotest_results.fits' % sensor_id, jobname=jobname)[0] except IndexError: raise RuntimeError('eotestUtils.getSensorGains: %s %s' % (sensor_id, jobname)) data = sensorTest.EOTestResults(gain_file) amps = data['AMP'] gains = data['GAIN'] sensorGains = dict([(amp, gains[amp - 1]) for amp in amps]) return sensorGains
def validate_dark_current(results, det_names): """Validate and persist dark current results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) results_file = '%s_eotest_results.fits' % file_prefix if not os.path.isfile(results_file): missing_det_names.append(det_name) continue data = sensorTest.EOTestResults(results_file) amps = data['AMP'] dc95s = data['DARK_CURRENT_95'] for amp, dc95 in zip(amps, dc95s): results.append( lcatr.schema.valid(lcatr.schema.get('dark_current_BOT'), amp=amp, dark_current_95CL=dc95, slot=slot, raft=raft)) # Persist the png files. metadata = dict(TESTTYPE='DARK', TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) pattern = '{}_noise.png'.format(file_prefix) results.extend( siteUtils.persist_png_files(pattern, file_prefix, metadata=metadata)) pattern = '{}_total_noise_hists.png'.format(file_prefix) results.extend( siteUtils.persist_png_files(pattern, file_prefix, metadata=metadata)) report_missing_data("validate_dark_current", missing_det_names) return results
def gains(eotest_results_file, is_PTC=False): """ Extract gains from the results file of some eo testing. Parameters: ----------- eotest_results_file : str Path to the file where EOTest gain results are stored is_PTC : boolean (optional) If True, PTC gains are returns. If False (default) the Fe55 gains are returned """ results = sensorTest.EOTestResults(eotest_results_file) if is_PTC: return {amp: gain for amp, gain in zip(results['AMP'], results['PTC_GAIN'])} else: return {amp: gain for amp, gain in zip(results['AMP'], results['GAIN'])}
def __init__(self, results_files): """ Constructor. Parameters ---------- results_files : dict """ self._raft_slots \ = OrderedDict([(slot, i) for i, slot in enumerate('S00 S01 S02 S10 S11 S12 S20 S21 S22'.split())]) if 'S00' not in results_files.keys(): self._raft_slots \ = OrderedDict([(slot, i) for i, slot in enumerate('SW0 SW1 SG0 SG1'.split())]) self.results = dict() self.sensor_ids = dict() for slot, filename in list(results_files.items()): self.sensor_ids[slot] = filename.split('_')[0] self.results[slot] = sensorTest.EOTestResults(filename)
def persist_fe55_analysis_results(): """Persist the results from the full analysis.""" raft_id = siteUtils.getUnitId() raft = camera_components.Raft.create_from_etrav(raft_id) results = [] for slot, sensor_id in raft.items(): ccd_vendor = sensor_id.split('-')[0].upper() # The output files from producer script. gain_file = '%(sensor_id)s_eotest_results.fits' % locals() psf_results = glob.glob('%(sensor_id)s_psf_results*.fits' % locals())[0] rolloff_mask = '%(sensor_id)s_rolloff_defects_mask.fits' % locals() output_files = gain_file, psf_results, rolloff_mask # Add/update the metadata to the primary HDU of these files. for fitsfile in output_files: eotestUtils.addHeaderData(fitsfile, LSST_NUM=sensor_id, TESTTYPE='FE55', DATE=eotestUtils.utc_now_isoformat(), CCD_MANU=ccd_vendor) # # Persist the median bias FITS file. # bias_median_file = glob.glob(f'{sensor_id}_*_median_bias.fits')[0] results.append(siteUtils.make_fileref(bias_median_file, folder=slot)) # Persist the png files. metadata = dict(CCD_MANU=ccd_vendor, LSST_NUM=sensor_id, TESTTYPE='FE55', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('%s*.png' % sensor_id, sensor_id, folder=slot, metadata=metadata)) data = sensorTest.EOTestResults(gain_file) amps = data['AMP'] gain_data = data['GAIN'] gain_errors = data['GAIN_ERROR'] sigmas = data['PSF_SIGMA'] for amp, gain_value, gain_error, sigma in zip(amps, gain_data, gain_errors, sigmas): if not np.isfinite(gain_error): gain_error = -1 results.append( lcatr.schema.valid(lcatr.schema.get('fe55_raft_analysis'), amp=amp, gain=gain_value, gain_error=gain_error, psf_sigma=sigma, slot=slot, sensor_id=sensor_id)) results.extend([lcatr.schema.fileref.make(x) for x in output_files]) return results
def raft_results_task(raft_name): """Task to aggregate data for raft-level plots and results.""" import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import lsst.eotest.sensor as sensorTest import lsst.eotest.raft as raftTest import siteUtils from camera_components import camera_info from bot_eo_analyses import get_raft_files_by_slot, make_file_prefix,\ get_amplifier_gains, get_analysis_types def plt_savefig(filename): plt.savefig(filename) plt.close() # Get results files for each CCD in the raft. try: results_files \ = get_raft_files_by_slot(raft_name, 'eotest_results.fits') print("results_files:", results_files) except FileNotFoundError: print("No raft-level results for", raft_name) results_files = {} # Determine the total number of pixels and number of edge rolloff # pixels for the types of CCDs in this raft and update the results # files. This info will be used in computing the pixel defect # compliance. Use one of the median bias files for this since they # should be available no matter which analysis tasks are run. bias_frames = get_raft_files_by_slot(raft_name, 'median_bias.fits', jobname='bias_frame_BOT') try: mask_files = get_raft_files_by_slot(raft_name, 'edge_rolloff_mask.fits') except FileNotFoundError: input_mask = None else: input_mask = list(mask_files.values())[0] total_num, rolloff_mask \ = sensorTest.pixel_counts(list(bias_frames.values())[0], input_mask=input_mask) # Exposure time (in seconds) for 95th percentile dark current shot # noise calculation. exptime = 15. # Update the eotest results files. analysis_types = get_analysis_types() for filename in results_files.values(): eotest_results = sensorTest.EOTestResults(filename) eotest_results.add_ccd_result('TOTAL_NUM_PIXELS', total_num) eotest_results.add_ccd_result('ROLLOFF_MASK_PIXELS', rolloff_mask) shot_noise = eotest_results['DARK_CURRENT_95'] * exptime total_noise = np.sqrt(eotest_results['READ_NOISE']**2 + shot_noise) add_max_frac_dev = ('MAX_FRAC_DEV' not in eotest_results.colnames and 'linearity' in analysis_types) for i, amp in enumerate(eotest_results['AMP']): if add_max_frac_dev: eotest_results.add_seg_result(amp, 'MAX_FRAC_DEV', 0.) eotest_results.add_seg_result(amp, 'DC95_SHOT_NOISE', np.float(shot_noise[i])) try: eotest_results['TOTAL_NOISE'][i] = total_noise[i] except KeyError: eotest_results.add_seg_result(amp, 'TOTAL_NOISE', np.float(total_noise[i])) eotest_results.write(filename) run = siteUtils.getRunNumber() file_prefix = make_file_prefix(run, raft_name) title = '{}, {}'.format(run, raft_name) gains = { slot_name: get_amplifier_gains(results_files[slot_name]) for slot_name in results_files } # Update the gains in the results files with the retrieved values. for slot_name, ccd_gains in gains.items(): try: results = sensorTest.EOTestResults(results_files[slot_name]) except KeyError: continue else: for amp, gain in ccd_gains.items(): results.add_seg_result(amp, 'GAIN', gain) results.write() # Extract dark currents for each amplifier in the raft. dark_currents = dict() for slot_name, results_file in results_files.items(): results = sensorTest.EOTestResults(results_file) try: dark_currents[slot_name] \ = dict(_ for _ in zip(results['AMP'], results['DARK_CURRENT_MEDIAN'])) except KeyError: dark_currents[slot_name] = dict({amp: 0 for amp in range(1, 17)}) png_files = [] # Median bias mosaic median_bias = raftTest.make_raft_mosaic(bias_frames, bias_subtract=False) median_bias.plot(title='%s, median bias frames' % title, annotation='ADU/pixel', rotate=180) png_files.append('{}_median_bias.png'.format(file_prefix)) plt_savefig(png_files[-1]) del median_bias # Dark mosaic dark_files = None try: dark_files = get_raft_files_by_slot(raft_name, 'median_dark_bp.fits') except FileNotFoundError: try: dark_files = get_raft_files_by_slot(raft_name, 'median_dark_current.fits') except FileNotFoundError as eobj: print(eobj) if dark_files is not None: dark_mosaic = raftTest.make_raft_mosaic(dark_files, gains=gains, bias_frames=bias_frames) dark_mosaic.plot( title='{}, medianed dark frames'.format(title), annotation='e-/pixel, gain-corrected, bias-subtracted', rotate=180) png_files.append('{}_medianed_dark.png'.format(file_prefix)) plt_savefig(png_files[-1]) del dark_mosaic # High flux superflat mosaic. try: sflat_high_files \ = get_raft_files_by_slot(raft_name, 'superflat_high.fits') except FileNotFoundError as eobj: print(eobj) else: sflat_high = raftTest.make_raft_mosaic(sflat_high_files, gains=gains, bias_frames=bias_frames, dark_currents=dark_currents) sflat_high.plot(title='%s, high flux superflat' % title, annotation='e-/pixel, gain-corrected, bias-subtracted', rotate=180) png_files.append('{}_superflat_high.png'.format(file_prefix)) plt_savefig(png_files[-1]) del sflat_high # Low flux superflat mosaic. try: sflat_low_files \ = get_raft_files_by_slot(raft_name, 'superflat_low.fits') except FileNotFoundError as eobj: print(eobj) else: sflat_low = raftTest.make_raft_mosaic(sflat_low_files, gains=gains, bias_frames=bias_frames, dark_currents=dark_currents) sflat_low.plot(title='%s, low flux superflat' % title, annotation='e-/pixel, gain-corrected, bias-subtracted', rotate=180) png_files.append('{}_superflat_low.png'.format(file_prefix)) plt_savefig(png_files[-1]) del sflat_low # QE images at various wavelengths and filters acq_jobname = siteUtils.getProcessName('BOT_acq') for wl in ('SDSSu', 'SDSSg', 'SDSSr', 'SDSSi', 'SDSSz', 'SDSSY', '480nm', '650nm', '750nm', '870nm', '950nm', '970nm'): print("Processing %s image" % wl) pattern = 'lambda_flat_{}*/*_{}_*.fits'.format(wl, raft_name) print(pattern) print(acq_jobname) files = siteUtils.dependency_glob(pattern, acq_jobname=acq_jobname) if not files: print("no files found") continue lambda_files = dict() for item in files: slot_name = os.path.basename(item).split('_')[-1].split('.')[0] lambda_files[slot_name] = item flat = raftTest.make_raft_mosaic(lambda_files, gains=gains, bias_frames=bias_frames, dark_currents=dark_currents) flat.plot(title='%s, %s' % (title, wl), annotation='e-/pixel, gain-corrected, bias-subtracted', rotate=180) png_files.append('{}_{}_flat.png'.format(file_prefix, wl)) plt_savefig(png_files[-1]) del flat # TODO: QE summary plot # Plots of read noise, nonlinearity, serial and parallel CTI, # PSF size, and gains from Fe55 and PTC. spec_plots = raftTest.RaftSpecPlots(results_files) columns = 'READ_NOISE DC95_SHOT_NOISE TOTAL_NOISE'.split() try: spec_plots.make_multi_column_plot(columns, 'noise per pixel (-e rms)', spec=9, title=title, ybounds=(-1, 100)) png_files.append('%s_total_noise.png' % file_prefix) plt_savefig(png_files[-1]) except KeyError: pass try: if 'linearity' in analysis_types: spec_plots.make_plot('MAX_FRAC_DEV', 'non-linearity (max. fractional deviation)', spec=0.03, title=title, ybounds=(0, 0.1)) png_files.append('%s_linearity.png' % file_prefix) plt_savefig(png_files[-1]) except KeyError: pass try: spec_plots.make_multi_column_plot( ('CTI_LOW_SERIAL', 'CTI_HIGH_SERIAL'), 'Serial CTI (ppm)', spec=(5e-6, 3e-5), title=title, yscaling=1e6, yerrors=True, colors='br', ybounds=(-1e-5, 6e-5)) png_files.append('%s_serial_cti.png' % file_prefix) plt_savefig(png_files[-1]) except KeyError: pass try: spec_plots.make_multi_column_plot( ('CTI_LOW_PARALLEL', 'CTI_HIGH_PARALLEL'), 'Parallel CTI (ppm)', spec=3e-6, title=title, yscaling=1e6, yerrors=True, colors='br', ybounds=(-1e-5, 6e-5)) png_files.append('%s_parallel_cti.png' % file_prefix) plt_savefig(png_files[-1]) except KeyError: pass try: spec_plots.make_plot('PSF_SIGMA', 'PSF sigma (microns)', spec=5., title=title, ybounds=(0, 5.2)) png_files.append('%s_psf_sigma.png' % file_prefix) plt_savefig(png_files[-1]) except KeyError: # PSF_SIGMA not available so skip this plot pass try: spec_plots.make_multi_column_plot(('GAIN', 'PTC_GAIN'), 'System Gain (e-/ADU)', yerrors=True, title=title, colors='br', ybounds=(0, 3)) png_files.append('%s_system_gain.png' % file_prefix) plt_savefig(png_files[-1]) except KeyError: # PTC_GAIN data not available so skip this plot. pass try: if 'dark' in analysis_types: spec_plots.make_plot('DARK_CURRENT_95', '95th percentile dark current (e-/pixel/s)', spec=0.2, title=title, ybounds=(-0.01, 1)) png_files.append('%s_dark_current.png' % file_prefix) plt_savefig(png_files[-1]) except KeyError: pass # Make bias frame stats time history plots for the current raft. pattern = f'{raft_name}_{run}_bias_frame_stats.pickle' try: stats_file = siteUtils.dependency_glob(pattern, jobname='bias_frame_BOT')[0] except IndexError: pass else: file_prefix = make_file_prefix(run, raft_name) df_raft = pd.read_pickle(stats_file) if raft_name in 'R00 R04 R40 R44': slots = 'SG0 SW1 SW0 SG1'.split() else: slots = 'S20 S21 S22 S10 S11 S12 S00 S01 S02'.split() t0 = int(np.min(df_raft['MJD'])) fig = plt.figure(figsize=(12, 12)) for i, slot in enumerate(slots, 1): fig.add_subplot(3, 3, i) df = df_raft.query(f'slot == "{slot}"') amps = sorted(list(set(df['amp']))) for amp in amps: my_df = df.query(f'amp == {amp}') plt.scatter(my_df['MJD'] - t0, my_df['mean'], s=2, label=f'{amp}') xmin, xmax, _, _ = plt.axis() plt.xlim(xmin, 1.2 * (xmax - xmin) + xmin) plt.legend(fontsize='x-small') plt.xlabel(f'MJD - {t0}') plt.ylabel('mean signal (ADU)') plt.title(slot) plt.tight_layout(rect=(0, 0, 1, 0.95)) plt.suptitle(f'{file_prefix}, bias stability, mean signal') png_file = f'{file_prefix}_bias_stability_mean.png' png_files.append(png_file) plt_savefig(png_file) fig = plt.figure(figsize=(12, 12)) for i, slot in enumerate(slots, 1): fig.add_subplot(3, 3, i) df = df_raft.query(f'slot == "{slot}"') amps = sorted(list(set(df['amp']))) for amp in amps: my_df = df.query(f'amp == {amp}') plt.scatter(my_df['MJD'] - t0, my_df['stdev'], s=2, label=f'{amp}') xmin, xmax, _, _ = plt.axis() plt.xlim(xmin, 1.2 * (xmax - xmin) + xmin) plt.legend(fontsize='x-small') plt.xlabel(f'MJD - {t0}') plt.ylabel('stdev (ADU)') plt.title(slot) plt.tight_layout(rect=(0, 0, 1, 0.95)) plt.suptitle(f'{file_prefix}, bias stability, stdev') png_file = f'{file_prefix}_bias_stability_stdev.png' png_files.append(png_file) plt_savefig(png_file) png_file_list = '{}_raft_results_task_png_files.txt'.format(raft_name) with open(png_file_list, 'w') as output: for item in png_files: if os.path.isfile(item): output.write('{}\n'.format(item)) return None
LSST_NUM=sensor_id, producer='SR-EOT-1', TESTTYPE='FE55', TEST_CATEGORY='EO') # # Persist various png files. # png_files = glob.glob('%(sensor_id)s_fe55*.png' % locals()) png_filerefs = [] for png_file in png_files: dp = eotestUtils.png_data_product(png_file, sensor_id) png_filerefs.append( lcatr.schema.fileref.make(png_file, metadata=md(DATA_PRODUCT=dp))) results.extend(png_filerefs) data = sensorTest.EOTestResults(gain_file) amps = data['AMP'] gain_data = data['GAIN'] gain_errors = data['GAIN_ERROR'] sigmas = data['PSF_SIGMA'] for amp, gain_value, gain_error, sigma in zip(amps, gain_data, gain_errors, sigmas): results.append( lcatr.schema.valid(lcatr.schema.get('fe55_analysis'), amp=amp, gain=gain_value, gain_error=gain_error, psf_sigma=sigma)) results.extend(siteUtils.jobInfo())
def gains(eotest_results_file): """ Extract Fe55 gains from the results file of some eo testing. """ results = sensorTest.EOTestResults(eotest_results_file) return {amp: gain for amp, gain in zip(results['AMP'], results['GAIN'])}
raft_id = siteUtils.getUnitId() raft = camera_components.Raft.create_from_etrav(raft_id) results = [] for slot, sensor_id in raft.items(): print("Processing:", slot, sensor_id) ccd_vendor = sensor_id.split('-')[0].upper() superflats = glob.glob('%(sensor_id)s_superflat_*.fits' % locals()) for item in superflats: eotestUtils.addHeaderData(item, FILENAME=item, DATE=eotestUtils.utc_now_isoformat()) results.extend([siteUtils.make_fileref(x, folder=slot) for x in superflats]) results_file = '%s_eotest_results.fits' % sensor_id data = sensorTest.EOTestResults(results_file) amps = data['AMP'] cti_high_serial = data['CTI_HIGH_SERIAL'] cti_high_serial_error = data['CTI_HIGH_SERIAL_ERROR'] cti_high_parallel = data['CTI_HIGH_PARALLEL'] cti_high_parallel_error = data['CTI_HIGH_PARALLEL_ERROR'] cti_low_serial = data['CTI_LOW_SERIAL'] cti_low_serial_error = data['CTI_LOW_SERIAL_ERROR'] cti_low_parallel = data['CTI_LOW_PARALLEL'] cti_low_parallel_error = data['CTI_LOW_PARALLEL_ERROR'] for values in zip(amps, cti_high_serial, cti_high_serial_error, cti_high_parallel, cti_high_parallel_error,
# to read noise to produce updated total noise. shot_noise = repackager.eotest_results['DARK_CURRENT_95']*exptime total_noise = np.sqrt(repackager.eotest_results['READ_NOISE']**2 + shot_noise) for i, amp in enumerate(repackager.eotest_results['AMP']): repackager.eotest_results.add_seg_result(amp, 'DC95_SHOT_NOISE', np.float(shot_noise[i])) repackager.eotest_results['TOTAL_NOISE'][i] = total_noise[i] outfile = '%s_eotest_results.fits' % sensor_id repackager.write(outfile) results_files[slot] = outfile gains = dict() for slot, res_file in results_files.items(): results = sensorTest.EOTestResults(res_file) gains[slot] = dict([(amp, gain) for amp, gain in zip(results['AMP'], results['GAIN'])]) # Collect super bias files for bias frame subtraction. bias_frames = slot_dependency_glob('*median_bias.fits', 'dark_defects_raft') title = '%s, %s' % (raft_id, run_number) file_prefix = '%s_%s' % (raft_id, run_number) # Raft-level mosaics of median darks, bias, superflats high and low. dark_mosaic = raftTest.RaftMosaic(slot_dependency_glob('*median_dark_bp.fits', 'bright_defects_raft'), gains=gains, bias_frames=bias_frames) dark_mosaic.plot(title='%s, medianed dark frames' % title, annotation='e-/pixel, gain-corrected, bias-subtracted',
def validate_fe55(results, det_names): """Validate and persist fe55 gain and psf results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) # The output files from producer script. gain_file = '%(file_prefix)s_eotest_results.fits' % locals() psf_results_files \ = glob.glob('%(file_prefix)s_psf_results*.fits' % locals()) if not os.path.isfile(gain_file) or not psf_results_files: # Results for this detector are not available so note # that and continue with the others. missing_det_names.append(det_name) continue psf_results = psf_results_files[0] rolloff_mask = '%(file_prefix)s_edge_rolloff_mask.fits' % locals() output_files = psf_results, rolloff_mask # Add/update the metadata to the primary HDU of these files. for fitsfile in output_files: eotestUtils.addHeaderData(fitsfile, TESTTYPE='FE55', DATE=eotestUtils.utc_now_isoformat()) results.extend([lcatr.schema.fileref.make(x) for x in output_files]) # Persist the median bias FITS file. bias_frame \ = glob.glob('%(file_prefix)s_median_bias.fits' % locals())[0] results.append(siteUtils.make_fileref(bias_frame)) # Persist the png files. png_file_list = '{}_fe55_task_png_files.txt'.format(det_name) with open(png_file_list, 'r') as input_: png_files = [x.strip() for x in input_] metadata = dict(TESTTYPE='FE55', TEST_CATEGORY='EO', DETECTOR=det_name, RUN=run) results.extend( siteUtils.persist_png_files('', file_prefix, png_files=png_files, metadata=metadata)) data = sensorTest.EOTestResults(gain_file) amps = data['AMP'] gain_data = data['GAIN'] gain_errors = data['GAIN_ERROR'] sigmas = data['PSF_SIGMA'] for amp, gain_value, gain_error, sigma in zip(amps, gain_data, gain_errors, sigmas): if not np.isfinite(gain_error): gain_error = -1 results.append( lcatr.schema.valid(lcatr.schema.get('fe55_BOT_analysis'), amp=amp, gain=gain_value, gain_error=gain_error, psf_sigma=sigma, slot=slot, raft=raft)) report_missing_data('validate_fe55', missing_det_names) return results
sensor_id = siteUtils.getUnitId() mask_file = '%s_dark_pixel_mask.fits' % sensor_id eotestUtils.addHeaderData(mask_file, LSST_NUM=sensor_id, TESTTYPE='SFLAT_500', DATE=eotestUtils.utc_now_isoformat(), CCD_MANU=siteUtils.getCcdVendor().upper()) results = [lcatr.schema.fileref.make(mask_file)] superflat = '%s_median_sflat.fits' % sensor_id eotestUtils.addHeaderData(superflat, DATE=eotestUtils.utc_now_isoformat()) results.append(lcatr.schema.fileref.make(superflat)) eotest_results = '%s_eotest_results.fits' % sensor_id data = sensorTest.EOTestResults(eotest_results) amps = data['AMP'] npixels = data['NUM_DARK_PIXELS'] ncolumns = data['NUM_DARK_COLUMNS'] for amp, npix, ncol in zip(amps, npixels, ncolumns): results.append( lcatr.schema.valid(lcatr.schema.get('dark_defects'), amp=amp, dark_pixels=npix, dark_columns=ncol)) results.extend(siteUtils.jobInfo()) lcatr.schema.write_file(results) lcatr.schema.validate_file()
#!/usr/bin/env python import glob import lsst.eotest.sensor as sensorTest import lcatr.schema import siteUtils import eotestUtils sensor_id = siteUtils.getUnitId() results = [] read_noise_file = '%s_eotest_results.fits' % sensor_id data = sensorTest.EOTestResults(read_noise_file) amps = data['AMP'] read_noise_data = data['READ_NOISE'] system_noise_data = data['SYSTEM_NOISE'] total_noise_data = data['TOTAL_NOISE'] for amp, read_noise, system_noise, total_noise in zip(amps, read_noise_data, system_noise_data, total_noise_data): results.append( lcatr.schema.valid(lcatr.schema.get('read_noise'), amp=amp, read_noise=read_noise, system_noise=system_noise, total_noise=total_noise)) results.extend(siteUtils.jobInfo()) results.append(eotestUtils.eotestCalibrations())
def validate_cte(results, det_names): """Validate the CTE task results.""" run = siteUtils.getRunNumber() missing_det_names = [] for det_name in det_names: raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) superflats \ = sorted(glob.glob('{}_superflat_*.fits'.format(file_prefix))) if not superflats: missing_det_names.append(det_name) continue for item in superflats: eotestUtils.addHeaderData(item, FILENAME=item, DATE=eotestUtils.utc_now_isoformat()) results.extend([siteUtils.make_fileref(x) for x in superflats]) results_file = '%s_eotest_results.fits' % file_prefix data = sensorTest.EOTestResults(results_file) amps = data['AMP'] cti_high_serial = data['CTI_HIGH_SERIAL'] cti_high_serial_error = data['CTI_HIGH_SERIAL_ERROR'] cti_high_parallel = data['CTI_HIGH_PARALLEL'] cti_high_parallel_error = data['CTI_HIGH_PARALLEL_ERROR'] cti_low_serial = data['CTI_LOW_SERIAL'] cti_low_serial_error = data['CTI_LOW_SERIAL_ERROR'] cti_low_parallel = data['CTI_LOW_PARALLEL'] cti_low_parallel_error = data['CTI_LOW_PARALLEL_ERROR'] for values in zip(amps, cti_high_serial, cti_high_serial_error, cti_high_parallel, cti_high_parallel_error, cti_low_serial, cti_low_serial_error, cti_low_parallel, cti_low_parallel_error): results.append( lcatr.schema.valid(lcatr.schema.get('cte_BOT'), amp=values[0], cti_high_serial=values[1], cti_high_serial_error=values[2], cti_high_parallel=values[3], cti_high_parallel_error=values[4], cti_low_serial=values[5], cti_low_serial_error=values[6], cti_low_parallel=values[7], cti_low_parallel_error=values[8], slot=slot, raft=raft)) # Persist the png files. png_file_list = '{}_cte_task_png_files.txt'.format(det_name) with open(png_file_list, 'r') as input_: png_files = [x.strip() for x in input_] metadata = dict(DETECTOR=det_name, RUN=run, TESTTYPE='SFLAT_500', TEST_CATEGORY='EO') results.extend( siteUtils.persist_png_files('', file_prefix, png_files=png_files, metadata=metadata)) report_missing_data("validate_cte", missing_det_names) return results
default='cosmic_ray_catalog.fits', type=str, help='Output file name for CR catalog') parser.add_argument('--medianed_dark', default=None, type=str, help='Filename of medianed dark.') parser.add_argument('--nsig', default=7, type=float, help='Number of sigma threshold for detecting CRs') args = parser.parse_args() darks = glob.glob(args.dark_frame_pattern) eo_results = sensorTest.EOTestResults(args.eotest_results) gains = { amp: gain for amp, gain in zip(eo_results['AMP'], eo_results['GAIN']) } med_file = args.medianed_dark if med_file is None: med_file = tempfile.NamedTemporaryFile(prefix='tmp_med_file_', dir='.', suffix='.fits').name imutils.fits_median_file(darks, med_file, bitpix=-32) mask_file = tempfile.NamedTemporaryFile(prefix='tmp_mask_', dir='.', suffix='.fits').name
f'sflat_flat_*_H_*/*{det_name}.fits') flat1_files = fp_server.get_files('BOT_acq', f'flat_*_flat1_*/*{det_name}.fits') lambda_files = fp_server.get_files('BOT_acq', f'lambda_flat_*/*{det_name}.fits') # Make a medianed bias frame to use in the various tasks. bias_frame = bot_eo.make_bias_filename(Run, det_name) bot_eo.bias_frame_task(Run, det_name, bias_files, bias_frame=bias_frame) bot_eo.fe55_task(Fe55Run, det_name, fe55_files, bias_frame=bias_frame) plt.close('all') # This is needed to recover memory from matplotlib. # Get the Fe55 gains from the fe55_results_file. Note that this # job generates the edge rolloff mask. fe55_results = sensorTest.EOTestResults(fe55_results_file) gains = dict(zip(fe55_results['AMP'], fe55_results['GAIN'])) print(gains) # Do the read noise analysis, but only consider the first 5 bias frame # files. At least two are needed for this task. bot_eo.read_noise_task(Run, det_name, bias_files[:5], gains) plt.close('all') bot_eo.raft_noise_correlations(Run, raft, bias_file_dict) plt.close('all') # Get the edge rolloff mask. mask_files = sorted(glob.glob('_'.join((det_name, Run, '*mask.fits')))) # The two defects tasks generate mask files, so the mask files