def _get_input_ad(basename, should_preprocess): input_fname = basename.replace('.fits', '_flatCorrected.fits') input_path = os.path.join(new_path_to_inputs, input_fname) cals = get_associated_calibrations(basename) if should_preprocess: filename = cache_path(basename) ad = astrodata.open(filename) cals = [cache_path(c) for c in cals.filename.values] master_bias = reduce_bias( ad.data_label(), dataselect.select_data(cals, tags=['BIAS'])) master_flat = reduce_flat( ad.data_label(), dataselect.select_data(cals, tags=['FLAT']), master_bias) master_arc = reduce_arc(ad.data_label(), dataselect.select_data(cals, tags=['ARC'])) input_data = reduce_data(ad, master_arc, master_bias, master_flat) elif os.path.exists(input_path): input_data = astrodata.open(input_path) else: raise IOError( 'Could not find input file:\n' + ' {:s}\n'.format(input_path) + ' Run pytest with "--force-preprocess-data" to get it') return input_data
def test_reduce_image_GN_EEV_2x2_g(path_to_inputs): logutils.config(file_name='gmos_test_reduce_image_GN_EEV_2x2_g.log') calib_files = [] raw_subdir = 'GMOS/GN-2002A-Q-89' all_files = sorted(glob.glob( os.path.join(path_to_inputs, raw_subdir, '*.fits'))) assert len(all_files) > 1 list_of_bias = dataselect.select_data( all_files, ['BIAS'], [] ) list_of_flats = dataselect.select_data( all_files, ['IMAGE', 'FLAT'], [], dataselect.expr_parser('filter_name=="g"') ) # These old data don't have an OBSCLASS keyword: list_of_science_files = dataselect.select_data( all_files, [], ['CAL'], dataselect.expr_parser( 'object=="PerseusField4" and filter_name=="g"' ) ) reduce_bias = Reduce() assert len(reduce_bias.files) == 0 reduce_bias.files.extend(list_of_bias) assert len(reduce_bias.files) == len(list_of_bias) reduce_bias.runr() calib_files.append( 'processed_bias:{}'.format(reduce_bias.output_filenames[0]) ) reduce_flats = Reduce() reduce_flats.files.extend(list_of_flats) reduce_flats.ucals = normalize_ucals(reduce_flats.files, calib_files) reduce_flats.runr() calib_files.append( 'processed_flat:{}'.format(reduce_flats.output_filenames[0]) ) reduce_target = Reduce() reduce_target.files.extend(list_of_science_files) reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files) reduce_target.runr()
def test_reduce_image_GN_HAM_2x2_z(path_to_inputs): objgraph = pytest.importorskip("objgraph") logutils.config(file_name='gmos_test_reduce_image_GN_HAM_2x2_z.log') calib_files = [] raw_subdir = 'GMOS/GN-2017B-LP-15' all_files = sorted(glob.glob( os.path.join(path_to_inputs, raw_subdir, '*.fits'))) assert len(all_files) > 1 list_of_bias = dataselect.select_data(all_files, ['BIAS'], []) expr = dataselect.expr_parser('filter_name=="z"') list_of_z_flats = dataselect.select_data(all_files, ['TWILIGHT'], [], expr) expr = dataselect.expr_parser( 'observation_class=="science" and filter_name=="z"' ) list_of_science = dataselect.select_data(all_files, [], ['CAL'], expr) def reduce(filelist, saveto=None, label='', calib_files=None, recipename=None): red = Reduce() assert len(red.files) == 0 red.files.extend(filelist) assert len(red.files) == len(filelist) if calib_files: red.ucals = normalize_ucals(red.files, calib_files) if recipename: red.recipename = recipename red.runr() if saveto: calib_files.append(f'{saveto}:{red.output_filenames[0]}') # check that we are not leaking objects assert len(objgraph.by_type('NDAstroData')) == 0 reduce(list_of_bias, saveto='processed_bias', label='bias', calib_files=calib_files) reduce(list_of_z_flats, saveto='processed_flat', label='flat', calib_files=calib_files) # If makeFringe is included in the science recipe, this can be omitted: reduce(list_of_science, saveto='processed_fringe', label='fringe', calib_files=calib_files, recipename='makeProcessedFringe') reduce(list_of_science, label='science', calib_files=calib_files)
def test_select_data(f2_data): answer = dataselect.select_data(f2_data, ["F2", "FLAT"], [], dataselect.expr_parser('filter_name=="Y"')) # For legibility, the list of answers just has the files correct_files = {'S20131126S1111.fits', 'S20131126S1112.fits'} assert {os.path.basename(f) for f in answer} == correct_files
def test_descriptor_matches_type(descriptor, expected_type, gemini_files): gnirs_files = dataselect.select_data(gemini_files, tags=['GNIRS']) for _file in gnirs_files: ad = astrodata.open(_file) value = getattr(ad, descriptor)() assert isinstance(value, expected_type) or value is None, \ "Assertion failed for file: {}".format(_file)
def __init__(self, path): log_dir = "./logs" dataset = sorted( glob.glob(os.path.join(path_to_inputs, path, '*.fits'))) list_of_bias = dataselect.select_data(dataset, ['BIAS'], []) list_of_flats = dataselect.select_data(dataset, ['FLAT'], []) list_of_arcs = dataselect.select_data(dataset, ['ARC'], []) list_of_science = dataselect.select_data(dataset, [], ['CAL']) full_path = os.path.join(path_to_outputs, path) os.makedirs(log_dir, exist_ok=True) os.makedirs(full_path, exist_ok=True) config_file_name = os.path.join(full_path, "calibration_manager.cfg") if os.path.exists(config_file_name): os.remove(config_file_name) config_file_content = ("[calibs]\n" "standalone = False\n" "database_dir = {:s}\n".format(full_path)) with open(config_file_name, mode='w') as config_file: config_file.write(config_file_content) calibration_service = cal_service.CalibrationService() calibration_service.config(config_file=config_file_name) self.arcs = list_of_arcs self.biases = list_of_bias self.calibration_service = calibration_service self.flats = list_of_flats self.full_path = full_path self.log_dir = log_dir self.science = list_of_science
def test_reduce_image(path_to_inputs): calib_files = [] all_files = glob.glob( os.path.join(path_to_inputs, 'GSAOI/test_reduce/', '*.fits')) all_files.sort() assert len(all_files) > 1 list_of_darks = dataselect.select_data(all_files, ['DARK'], []) list_of_darks.sort() list_of_kshort_flats = dataselect.select_data( all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="Kshort"')) list_of_kshort_flats.sort() list_of_h_flats = dataselect.select_data( all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="H"')) list_of_h_flats.sort() list_of_science_files = dataselect.select_data( all_files, [], [], dataselect.expr_parser( 'observation_class=="science" and exposure_time==60.')) list_of_science_files.sort() for darks in [list_of_darks]: reduce_darks = Reduce() assert len(reduce_darks.files) == 0 reduce_darks.files.extend(darks) assert len(reduce_darks.files) == len(darks) logutils.config(file_name='gsaoi_test_reduce_dark.log', mode='quiet') reduce_darks.runr() del reduce_darks logutils.config(file_name='gsaoi_test_reduce_bpm.log', mode='quiet') reduce_bpm = Reduce() reduce_bpm.files.extend(list_of_h_flats) reduce_bpm.files.extend(list_of_darks) reduce_bpm.recipename = 'makeProcessedBPM' reduce_bpm.runr() bpm_filename = reduce_bpm.output_filenames[0] del reduce_bpm logutils.config(file_name='gsaoi_test_reduce_flats.log', mode='quiet') reduce_flats = Reduce() reduce_flats.files.extend(list_of_kshort_flats) reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_flats.runr() calib_files.append('processed_flat:{}'.format( reduce_flats.output_filenames[0])) del reduce_flats logutils.config(file_name='gsaoi_test_reduce_science.log', mode='quiet') reduce_target = Reduce() reduce_target.files.extend(list_of_science_files) reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files) reduce_target.runr() del reduce_target
def test_reduce_image(change_working_dir): with change_working_dir(): calib_files = [] all_files = [download_from_archive(f) for f in datasets] all_files.sort() assert len(all_files) > 1 darks_3s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==3')) darks_3s.sort() darks_20s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==20')) darks_20s.sort() darks_120s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==120')) darks_120s.sort() flats = dataselect.select_data( all_files, ['F2', 'FLAT', 'RAW'], [], dataselect.expr_parser('filter_name=="Y"')) flats.sort() science = dataselect.select_data( all_files, ['F2', 'RAW'], ['CAL'], dataselect.expr_parser('filter_name=="Y"')) science.sort() for darks in [darks_3s, darks_20s, darks_120s]: reduce_darks = Reduce() assert len(reduce_darks.files) == 0 reduce_darks.files.extend(darks) assert len(reduce_darks.files) == len(darks) logutils.config(file_name='f2_test_reduce_darks.log', mode='quiet') reduce_darks.runr() calib_files.append('processed_dark:{}'.format( reduce_darks.output_filenames[0])) logutils.config(file_name='f2_test_reduce_bpm.log', mode='quiet') reduce_bpm = Reduce() reduce_bpm.files.extend(flats) assert len(reduce_bpm.files) == len(flats) reduce_bpm.files.extend(darks_3s) assert len(reduce_bpm.files) == len(flats) + len(darks_3s) reduce_bpm.recipename = 'makeProcessedBPM' reduce_bpm.runr() bpm_filename = reduce_bpm.output_filenames[0] logutils.config(file_name='f2_test_reduce_flats.log', mode='quiet') reduce_flats = Reduce() reduce_flats.files.extend(flats) reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_flats.runr() calib_files.append('processed_flat:{}'.format( reduce_flats.output_filenames[0])) logutils.config(file_name='f2_test_reduce_science.log', mode='quiet') reduce_target = Reduce() reduce_target.files.extend(science) reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files) reduce_target.runr()
def test_reduce_image(test_path, caldb): logutils.config(file_name='gsaoi_test_reduce_image.log') caldb.init(wipe=True) all_files = glob.glob( os.path.join(test_path, 'GSAOI/test_reduce/', '*.fits')) assert len(all_files) > 1 list_of_darks = dataselect.select_data(all_files, ['DARK'], []) list_of_kshort_flats = dataselect.select_data( all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="Kshort"')) list_of_h_flats = dataselect.select_data( all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="H"')) list_of_std_LHS_2026 = dataselect.select_data( all_files, [], [], dataselect.expr_parser('object=="LHS 2026"')) list_of_std_cskd8 = dataselect.select_data( all_files, [], [], dataselect.expr_parser('object=="cskd-8"')) list_of_science_files = dataselect.select_data( all_files, [], [], dataselect.expr_parser( 'observation_class=="science" and exposure_time==60.')) for darks in [list_of_darks]: reduce_darks = Reduce() assert len(reduce_darks.files) == 0 reduce_darks.files.extend(darks) assert len(reduce_darks.files) == len(darks) reduce_darks.runr() caldb.add_cal(reduce_darks.output_filenames[0]) reduce_bpm = Reduce() reduce_bpm.files.extend(list_of_h_flats) reduce_bpm.files.extend(list_of_darks) reduce_bpm.recipename = 'makeProcessedBPM' reduce_bpm.runr() bpm_filename = reduce_bpm.output_filenames[0] reduce_flats = Reduce() reduce_flats.files.extend(list_of_kshort_flats) reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_flats.runr() caldb.add_cal(reduce_flats.output_filenames[0]) reduce_target = Reduce() reduce_target.files.extend(list_of_science_files) reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_target.runr() for f in caldb.list_files(): print(f)
def test_reduce_image(path_to_inputs): calib_files = [] all_files = glob.glob( os.path.join(path_to_inputs, 'F2/test_reduce/', '*.fits')) all_files.sort() assert len(all_files) > 1 darks_3s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==3')) darks_3s.sort() darks_20s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==20')) darks_20s.sort() darks_120s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==120')) darks_120s.sort() flats = dataselect.select_data(all_files, ['F2', 'FLAT', 'RAW'], [], dataselect.expr_parser('filter_name=="Y"')) flats.sort() science = dataselect.select_data( all_files, ['F2', 'RAW'], ['CAL'], dataselect.expr_parser('filter_name=="Y"')) science.sort() for darks in [darks_3s, darks_20s, darks_120s]: reduce_darks = Reduce() assert len(reduce_darks.files) == 0 reduce_darks.files.extend(darks) assert len(reduce_darks.files) == len(darks) logutils.config(file_name='f2_test_reduce_darks.log', mode='quiet') reduce_darks.runr() calib_files.append('processed_dark:{}'.format( reduce_darks.output_filenames[0])) logutils.config(file_name='f2_test_reduce_bpm.log', mode='quiet') reduce_bpm = Reduce() reduce_bpm.files.extend(flats) reduce_bpm.files.extend(darks_3s) reduce_bpm.recipename = 'makeProcessedBPM' reduce_bpm.runr() bpm_filename = reduce_bpm.output_filenames[0] logutils.config(file_name='f2_test_reduce_flats.log', mode='quiet') reduce_flats = Reduce() reduce_flats.files.extend(flats) reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_flats.runr() calib_files.append('processed_flat:{}'.format( reduce_flats.output_filenames[0])) logutils.config(file_name='f2_test_reduce_science.log', mode='quiet') reduce_target = Reduce() reduce_target.files.extend(science) reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files) reduce_target.runr()
def test_reduce_image_GS_HAM_2x2_i_std(path_to_inputs): logutils.config(file_name='gmos_test_reduce_image_GS_HAM_1x1_i.log') calib_files = [] raw_subdir = 'GMOS/GS-2017B-Q-6' all_files = sorted(glob.glob( os.path.join(path_to_inputs, raw_subdir, '*.fits'))) assert len(all_files) > 1 list_of_sci_bias = dataselect.select_data( all_files, ['BIAS'], [], dataselect.expr_parser('detector_x_bin==2 and detector_y_bin==2') ) list_of_sci_flats = dataselect.select_data( all_files, ['TWILIGHT'], [], dataselect.expr_parser( 'filter_name=="i" and detector_x_bin==2 and detector_y_bin==2' ) ) list_of_science_files = dataselect.select_data( all_files, [], [], dataselect.expr_parser( 'observation_class=="partnerCal" and filter_name=="i"' ) ) reduce_bias = Reduce() assert len(reduce_bias.files) == 0 reduce_bias.files.extend(list_of_sci_bias) assert len(reduce_bias.files) == len(list_of_sci_bias) reduce_bias.runr() calib_files.append( 'processed_bias:{}'.format(reduce_bias.output_filenames[0]) ) reduce_flats = Reduce() reduce_flats.files.extend(list_of_sci_flats) # reduce_flats.uparms = [('addDQ:user_bpm', 'fixed_bpm_2x2_FullFrame.fits')] reduce_flats.ucals = normalize_ucals(reduce_flats.files, calib_files) reduce_flats.runr() calib_files.append( 'processed_flat:{}'.format(reduce_flats.output_filenames[0]) ) reduce_target = Reduce() reduce_target.files.extend(list_of_science_files) reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files) reduce_target.uparms = [ ('stackFrames:memory', 1), # ('addDQ:user_bpm', 'fixed_bpm_2x2_FullFrame.fits'), ('resampleToCommonFrame:interpolator', 'spline3') ] reduce_target.runr()
def test_reduce_image(test_path, caldb): logutils.config(file_name='f2_test_reduce_image.log') caldb.init(wipe=True) all_files = glob.glob(os.path.join(test_path, 'F2/test_reduce/', '*.fits')) assert len(all_files) > 1 darks_3s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==3')) darks_20s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==20')) darks_120s = dataselect.select_data( all_files, ['F2', 'DARK', 'RAW'], [], dataselect.expr_parser('exposure_time==120')) flats = dataselect.select_data(all_files, ['F2', 'FLAT', 'RAW'], [], dataselect.expr_parser('filter_name=="Y"')) science = dataselect.select_data( all_files, ['F2', 'RAW'], ['CAL'], dataselect.expr_parser('filter_name=="Y"')) for darks in [darks_3s, darks_20s, darks_120s]: reduce_darks = Reduce() assert len(reduce_darks.files) == 0 reduce_darks.files.extend(darks) assert len(reduce_darks.files) == len(darks) reduce_darks.runr() caldb.add_cal(reduce_darks.output_filenames[0]) reduce_bpm = Reduce() reduce_bpm.files.extend(flats) reduce_bpm.files.extend(darks_3s) reduce_bpm.recipename = 'makeProcessedBPM' reduce_bpm.runr() bpm_filename = reduce_bpm.output_filenames[0] reduce_flats = Reduce() reduce_flats.files.extend(flats) reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_flats.runr() caldb.add_cal(reduce_flats.output_filenames[0]) reduce_target = Reduce() reduce_target.files.extend(science) reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)] reduce_target.runr() for f in caldb.list_files(): print(f)