Exemplo n.º 1
0
def test_reduce_image_GN_EEV_2x2_g(path_to_inputs):
    logutils.config(file_name='gmos_test_reduce_image_GN_EEV_2x2_g.log')

    calib_files = []

    raw_subdir = 'GMOS/GN-2002A-Q-89'

    all_files = sorted(glob.glob(
        os.path.join(path_to_inputs, raw_subdir, '*.fits')))
    assert len(all_files) > 1

    list_of_bias = dataselect.select_data(
        all_files,
        ['BIAS'],
        []
    )

    list_of_flats = dataselect.select_data(
        all_files,
        ['IMAGE', 'FLAT'],
        [],
        dataselect.expr_parser('filter_name=="g"')
    )

    # These old data don't have an OBSCLASS keyword:
    list_of_science_files = dataselect.select_data(
        all_files, [],
        ['CAL'],
        dataselect.expr_parser(
            'object=="PerseusField4" and filter_name=="g"'
        )
    )

    reduce_bias = Reduce()
    assert len(reduce_bias.files) == 0

    reduce_bias.files.extend(list_of_bias)
    assert len(reduce_bias.files) == len(list_of_bias)

    reduce_bias.runr()

    calib_files.append(
        'processed_bias:{}'.format(reduce_bias.output_filenames[0])
    )

    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_flats)
    reduce_flats.ucals = normalize_ucals(reduce_flats.files, calib_files)
    reduce_flats.runr()

    calib_files.append(
        'processed_flat:{}'.format(reduce_flats.output_filenames[0])
    )

    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
    reduce_target.runr()
Exemplo n.º 2
0
def test_reduce_image_GN_HAM_2x2_z(path_to_inputs):
    objgraph = pytest.importorskip("objgraph")

    logutils.config(file_name='gmos_test_reduce_image_GN_HAM_2x2_z.log')

    calib_files = []

    raw_subdir = 'GMOS/GN-2017B-LP-15'
    all_files = sorted(glob.glob(
        os.path.join(path_to_inputs, raw_subdir, '*.fits')))
    assert len(all_files) > 1

    list_of_bias = dataselect.select_data(all_files, ['BIAS'], [])

    expr = dataselect.expr_parser('filter_name=="z"')
    list_of_z_flats = dataselect.select_data(all_files, ['TWILIGHT'], [], expr)

    expr = dataselect.expr_parser(
        'observation_class=="science" and filter_name=="z"'
    )
    list_of_science = dataselect.select_data(all_files, [], ['CAL'], expr)

    def reduce(filelist, saveto=None, label='', calib_files=None,
               recipename=None):
        red = Reduce()
        assert len(red.files) == 0
        red.files.extend(filelist)
        assert len(red.files) == len(filelist)
        if calib_files:
            red.ucals = normalize_ucals(red.files, calib_files)
        if recipename:
            red.recipename = recipename
        red.runr()
        if saveto:
            calib_files.append(f'{saveto}:{red.output_filenames[0]}')

        # check that we are not leaking objects
        assert len(objgraph.by_type('NDAstroData')) == 0

    reduce(list_of_bias, saveto='processed_bias', label='bias',
           calib_files=calib_files)
    reduce(list_of_z_flats, saveto='processed_flat', label='flat',
           calib_files=calib_files)

    # If makeFringe is included in the science recipe, this can be omitted:
    reduce(list_of_science, saveto='processed_fringe', label='fringe',
           calib_files=calib_files, recipename='makeProcessedFringe')

    reduce(list_of_science, label='science', calib_files=calib_files)
Exemplo n.º 3
0
def test_expr_parser_can_parse_for_exposure_time():

    expression = ('exposure_time==60.', 'exposure_time==-45',
                  'exposure_time > 30.', 'exposure_time < 40.3',
                  'exposure_time! 60')

    for value in expression:
        # makes sure the expression starts with a 'filter_name' as this
        # test only focuses on these cases.
        assert "exposure_time" in value

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:
                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

        assert used_operator is not None

        # If operator is not '==', then original codified expression is returned
        if used_operator is not "=":
            expected = descriptor + used_operator + descriptor_answer

        else:
            expected = 'isclose(ad.' + descriptor + '(),' \
                       + descriptor_answer + ")"

        # Actual answer we're comparing to
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 4
0
def test_expr_parser_can_parse_for_exposure_time():

    expression = ('exposure_time==60.', 'exposure_time==-45',
                  'exposure_time > 30.', 'exposure_time < 40.3',
                  'exposure_time! 60')

    for value in expression:
        # makes sure the expression starts with a 'filter_name' as this
        # test only focuses on these cases.
        assert "exposure_time" in value

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:
                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

        assert used_operator is not None

        # If operator is not '==', then original codified expression is returned
        if used_operator is not "=":
            expected = descriptor + used_operator + descriptor_answer

        else:
            expected = 'isclose(ad.' + descriptor + '(),' \
                       + descriptor_answer + ")"

        # Actual answer we're comparing to
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 5
0
def test_select_data(f2_data):

    answer = dataselect.select_data(f2_data, ["F2", "FLAT"], [],
                                    dataselect.expr_parser('filter_name=="Y"'))

    # For legibility, the list of answers just has the files
    correct_files = {'S20131126S1111.fits', 'S20131126S1112.fits'}
    assert {os.path.basename(f) for f in answer} == correct_files
Exemplo n.º 6
0
def test_expr_parser_can_parse_for_ut_time_or_local_time():
    """
    Does exactly what expr_parser does when the requested descriptor is
    'ut_time' or 'local_time', but a more pythonic way. Should always return the same
    value as expr_parser when working with ut_time_or_local_time, but
    will not work for operations not defined (Ones not '!=<>')
    """
    expression = ('ut_time==22-4-14', 'ut_time==31-14-54',
                  'ut_time>12-15-43', 'ut_time<07-41-41',
                  'ut_time!12-0-0',
                  'local_time=="Some_Filter"', 'local_time=="Other_Filter',
                  'local_time>"small_fliter"', 'local_time<"Big_Filter',
                  'local_time!"Bad_Filter'
                  )

    for value in expression:
        # makes sure the expression starts with a 'ut_time_or_local_time' as this
        # test only focuses on these cases.
        assert ("ut_time" in value) or ("local_time" in value)

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:

                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

                if operator is "=":
                    # = -> == from testing to asserting in string
                    used_operator = "=="

        assert used_operator is not None

        expected = 'ad.' + descriptor + '()' + used_operator \
                   + 'datetime.strptime(' + descriptor_answer \
                   + ', "%H:%M:%S").time()'
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 7
0
def test_expr_parser_can_parse_for_ut_datetime():
    """
    Does exactly what expr_parser does when the requested descriptor is
    'ut_datetime', but a more pythonic way. Should always return the same
    value as expr_parser when working with ut_datetime, but
    will not work for operations not defined (Ones not '!=<>')
    """
    expression = ('ut_datetime=="Some_time"',
                  'ut_datetime==1998-02-23-12-43-43-52',
                  'ut_datetime>2017-04-07-06-04-03',
                  'ut_datetime<2007-05-23-16-54-01',
                  'ut_datetime!2015-02-20-17-14-03')

    for value in expression:
        # makes sure the expression starts with a 'ut_datetime' as this
        # test only focuses on these cases.
        assert "ut_datetime" in value

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:

                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

                if operator is "=":
                    # = -> == from testing to asserting in string
                    used_operator = "=="

        assert used_operator is not None

        expected = 'ad.' + descriptor + '()' + used_operator \
                   + 'datetime.strptime(' + descriptor_answer \
                   + ', "%Y-%m-%d %H:%M:%S")'
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 8
0
def test_expr_parser_can_parse_for_ut_time_or_local_time():
    """
    Does exactly what expr_parser does when the requested descriptor is
    'ut_time' or 'local_time', but a more pythonic way. Should always return the same
    value as expr_parser when working with ut_time_or_local_time, but
    will not work for operations not defined (Ones not '!=<>')
    """
    expression = ('ut_time==22-4-14', 'ut_time==31-14-54', 'ut_time>12-15-43',
                  'ut_time<07-41-41', 'ut_time!12-0-0',
                  'local_time=="Some_Filter"', 'local_time=="Other_Filter',
                  'local_time>"small_fliter"', 'local_time<"Big_Filter',
                  'local_time!"Bad_Filter')

    for value in expression:
        # makes sure the expression starts with a 'ut_time_or_local_time' as this
        # test only focuses on these cases.
        assert ("ut_time" in value) or ("local_time" in value)

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:

                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

                if operator is "=":
                    # = -> == from testing to asserting in string
                    used_operator = "=="

        assert used_operator is not None

        expected = 'ad.' + descriptor + '()' + used_operator \
                   + 'datetime.strptime(' + descriptor_answer \
                   + ', "%H:%M:%S").time()'
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 9
0
def test_expr_parser_can_parse_for_ut_date():
    """
    Does exactly what expr_parser does when the requested descriptor is
    'ut_date', but a more pythonic way. Should always return the same
    value as expr_parser when working with ut_date, but
    will not work for operations not defined (Ones not '!=<>')
    """
    expression = ('ut_date==1999-12-13"', 'ut_date==2011-03-14',
                  'ut_date>2018-12-31', 'ut_date<2001-01-01',
                  'ut_date!2018-08-31')

    for value in expression:
        # makes sure the expression starts with a 'ut_date' as this
        # test only focuses on these cases.
        assert "ut_date" in value

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:

                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

                if operator is "=":
                    # = -> == from testing to asserting in string
                    used_operator = "=="

        assert used_operator is not None

        expected = 'ad.' + descriptor + '()' + used_operator \
                   + 'datetime.strptime(' + descriptor_answer \
                   + ', "%Y-%m-%d").date()'
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 10
0
def test_expr_parser_can_parse_for_filter_name():
    """
    Does exactly what expr_parser does when the requested descriptor is
    'exposure_time', but a more pythonic way. Should always return the same
    value as expr_parser when working with exposure_times, but
    will not work for operations not defined (Ones not '!=<>')
    """
    expression = ('filter_name=="Some_Filter"', 'filter_name=="Other_Filter',
                  'filter_name>"small_fliter"', 'filter_name<"Big_Filter',
                  'filter_name!"Bad_Filter')

    for value in expression:
        # makes sure the expression starts with a 'filter_name' as this
        # test only focuses on these cases.
        assert "filter_name" in value

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:

                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

                if operator is "=":
                    # = -> == from testing to asserting in string
                    used_operator = "=="

        assert used_operator is not None

        expected = 'ad.' + descriptor + '(pretty=True)' \
                   + used_operator + descriptor_answer
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 11
0
def test_expr_parser_can_parse_for_filter_name():
    """
    Does exactly what expr_parser does when the requested descriptor is
    'exposure_time', but a more pythonic way. Should always return the same
    value as expr_parser when working with exposure_times, but
    will not work for operations not defined (Ones not '!=<>')
    """
    expression = ('filter_name=="Some_Filter"', 'filter_name=="Other_Filter',
                  'filter_name>"small_fliter"', 'filter_name<"Big_Filter',
                  'filter_name!"Bad_Filter')

    for value in expression:
        # makes sure the expression starts with a 'filter_name' as this
        # test only focuses on these cases.
        assert "filter_name" in value

        # resets following strings to 0 so their value from the last iter
        descriptor_answer = descriptor = used_operator = None

        for operator in "!=<>":
            if operator in value:

                descriptor = value.split(operator)[0]
                descriptor_answer = value.split(operator)[-1]
                used_operator = operator

                if operator is "=":
                    # = -> == from testing to asserting in string
                    used_operator = "=="

        assert used_operator is not None

        expected = 'ad.' + descriptor + '(pretty=True)' \
                   + used_operator + descriptor_answer
        answer = dataselect.expr_parser(value)

        assert answer == expected
Exemplo n.º 12
0
def test_reduce_image_GS_HAM_2x2_i_std(path_to_inputs):
    logutils.config(file_name='gmos_test_reduce_image_GS_HAM_1x1_i.log')

    calib_files = []

    raw_subdir = 'GMOS/GS-2017B-Q-6'

    all_files = sorted(glob.glob(
        os.path.join(path_to_inputs, raw_subdir, '*.fits')))
    assert len(all_files) > 1

    list_of_sci_bias = dataselect.select_data(
        all_files,
        ['BIAS'],
        [],
        dataselect.expr_parser('detector_x_bin==2 and detector_y_bin==2')
    )

    list_of_sci_flats = dataselect.select_data(
        all_files,
        ['TWILIGHT'],
        [],
        dataselect.expr_parser(
            'filter_name=="i" and detector_x_bin==2 and detector_y_bin==2'
        )
    )

    list_of_science_files = dataselect.select_data(
        all_files, [],
        [],
        dataselect.expr_parser(
            'observation_class=="partnerCal" and filter_name=="i"'
        )
    )

    reduce_bias = Reduce()
    assert len(reduce_bias.files) == 0

    reduce_bias.files.extend(list_of_sci_bias)
    assert len(reduce_bias.files) == len(list_of_sci_bias)

    reduce_bias.runr()

    calib_files.append(
        'processed_bias:{}'.format(reduce_bias.output_filenames[0])
    )

    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_sci_flats)
    # reduce_flats.uparms = [('addDQ:user_bpm', 'fixed_bpm_2x2_FullFrame.fits')]
    reduce_flats.ucals = normalize_ucals(reduce_flats.files, calib_files)
    reduce_flats.runr()

    calib_files.append(
        'processed_flat:{}'.format(reduce_flats.output_filenames[0])
    )

    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
    reduce_target.uparms = [
        ('stackFrames:memory', 1),
        # ('addDQ:user_bpm', 'fixed_bpm_2x2_FullFrame.fits'),
        ('resampleToCommonFrame:interpolator', 'spline3')
    ]
    reduce_target.runr()
Exemplo n.º 13
0
def test_reduce_image(change_working_dir):
    with change_working_dir():
        calib_files = []
        all_files = [download_from_archive(f) for f in datasets]
        all_files.sort()
        assert len(all_files) > 1

        darks_3s = dataselect.select_data(
            all_files, ['F2', 'DARK', 'RAW'], [],
            dataselect.expr_parser('exposure_time==3'))
        darks_3s.sort()

        darks_20s = dataselect.select_data(
            all_files, ['F2', 'DARK', 'RAW'], [],
            dataselect.expr_parser('exposure_time==20'))
        darks_20s.sort()

        darks_120s = dataselect.select_data(
            all_files, ['F2', 'DARK', 'RAW'], [],
            dataselect.expr_parser('exposure_time==120'))
        darks_120s.sort()

        flats = dataselect.select_data(
            all_files, ['F2', 'FLAT', 'RAW'], [],
            dataselect.expr_parser('filter_name=="Y"'))
        flats.sort()

        science = dataselect.select_data(
            all_files, ['F2', 'RAW'], ['CAL'],
            dataselect.expr_parser('filter_name=="Y"'))
        science.sort()

        for darks in [darks_3s, darks_20s, darks_120s]:
            reduce_darks = Reduce()
            assert len(reduce_darks.files) == 0

            reduce_darks.files.extend(darks)
            assert len(reduce_darks.files) == len(darks)

            logutils.config(file_name='f2_test_reduce_darks.log', mode='quiet')
            reduce_darks.runr()

            calib_files.append('processed_dark:{}'.format(
                reduce_darks.output_filenames[0]))

        logutils.config(file_name='f2_test_reduce_bpm.log', mode='quiet')
        reduce_bpm = Reduce()
        reduce_bpm.files.extend(flats)
        assert len(reduce_bpm.files) == len(flats)

        reduce_bpm.files.extend(darks_3s)
        assert len(reduce_bpm.files) == len(flats) + len(darks_3s)

        reduce_bpm.recipename = 'makeProcessedBPM'
        reduce_bpm.runr()

        bpm_filename = reduce_bpm.output_filenames[0]

        logutils.config(file_name='f2_test_reduce_flats.log', mode='quiet')
        reduce_flats = Reduce()
        reduce_flats.files.extend(flats)
        reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
        reduce_flats.runr()

        calib_files.append('processed_flat:{}'.format(
            reduce_flats.output_filenames[0]))

        logutils.config(file_name='f2_test_reduce_science.log', mode='quiet')
        reduce_target = Reduce()
        reduce_target.files.extend(science)
        reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
        reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
        reduce_target.runr()
Exemplo n.º 14
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='gsaoi_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(
        os.path.join(test_path, 'GSAOI/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    list_of_darks = dataselect.select_data(all_files, ['DARK'], [])

    list_of_kshort_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="Kshort"'))

    list_of_h_flats = dataselect.select_data(
        all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="H"'))

    list_of_std_LHS_2026 = dataselect.select_data(
        all_files, [], [], dataselect.expr_parser('object=="LHS 2026"'))

    list_of_std_cskd8 = dataselect.select_data(
        all_files, [], [], dataselect.expr_parser('object=="cskd-8"'))

    list_of_science_files = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser(
            'observation_class=="science" and exposure_time==60.'))

    for darks in [list_of_darks]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(list_of_h_flats)
    reduce_bpm.files.extend(list_of_darks)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_kshort_flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)
Exemplo n.º 15
0
def test_reduce_image(path_to_inputs):
    calib_files = []

    all_files = glob.glob(
        os.path.join(path_to_inputs, 'F2/test_reduce/', '*.fits'))

    all_files.sort()

    assert len(all_files) > 1

    darks_3s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==3'))
    darks_3s.sort()

    darks_20s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==20'))
    darks_20s.sort()

    darks_120s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==120'))
    darks_120s.sort()

    flats = dataselect.select_data(all_files, ['F2', 'FLAT', 'RAW'], [],
                                   dataselect.expr_parser('filter_name=="Y"'))
    flats.sort()

    science = dataselect.select_data(
        all_files, ['F2', 'RAW'], ['CAL'],
        dataselect.expr_parser('filter_name=="Y"'))
    science.sort()

    for darks in [darks_3s, darks_20s, darks_120s]:
        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        logutils.config(file_name='f2_test_reduce_darks.log', mode='quiet')
        reduce_darks.runr()

        calib_files.append('processed_dark:{}'.format(
            reduce_darks.output_filenames[0]))

    logutils.config(file_name='f2_test_reduce_bpm.log', mode='quiet')
    reduce_bpm = Reduce()
    reduce_bpm.files.extend(flats)
    reduce_bpm.files.extend(darks_3s)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    logutils.config(file_name='f2_test_reduce_flats.log', mode='quiet')
    reduce_flats = Reduce()
    reduce_flats.files.extend(flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    calib_files.append('processed_flat:{}'.format(
        reduce_flats.output_filenames[0]))

    logutils.config(file_name='f2_test_reduce_science.log', mode='quiet')
    reduce_target = Reduce()
    reduce_target.files.extend(science)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
    reduce_target.runr()
Exemplo n.º 16
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='f2_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(
        os.path.join(test_path, 'F2/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    darks_3s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==3'))

    darks_20s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==20'))

    darks_120s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==120'))

    flats = dataselect.select_data(
        all_files, ['F2', 'FLAT', 'RAW'], [],
        dataselect.expr_parser('filter_name=="Y"'))

    science = dataselect.select_data(
        all_files, ['F2', 'RAW'], ['CAL'],
        dataselect.expr_parser('filter_name=="Y"'))

    for darks in [darks_3s, darks_20s, darks_120s]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(flats)
    reduce_bpm.files.extend(darks_3s)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(science)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)
Exemplo n.º 17
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='gsaoi_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(
        os.path.join(test_path, 'GSAOI/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    list_of_darks = dataselect.select_data(
        all_files, ['DARK'], [])

    list_of_kshort_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="Kshort"'))

    list_of_h_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="H"'))

    list_of_std_LHS_2026 = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser('object=="LHS 2026"'))

    list_of_std_cskd8 = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser('object=="cskd-8"'))

    list_of_science_files = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser('observation_class=="science" and exposure_time==60.'))

    for darks in [list_of_darks]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(list_of_h_flats)
    reduce_bpm.files.extend(list_of_darks)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_kshort_flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)
Exemplo n.º 18
0
def test_reduce_image(path_to_inputs):
    calib_files = []

    all_files = glob.glob(
        os.path.join(path_to_inputs, 'GSAOI/test_reduce/', '*.fits'))

    all_files.sort()

    assert len(all_files) > 1

    list_of_darks = dataselect.select_data(all_files, ['DARK'], [])
    list_of_darks.sort()

    list_of_kshort_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="Kshort"'))
    list_of_kshort_flats.sort()

    list_of_h_flats = dataselect.select_data(
        all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="H"'))
    list_of_h_flats.sort()

    list_of_science_files = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser(
            'observation_class=="science" and exposure_time==60.'))
    list_of_science_files.sort()

    for darks in [list_of_darks]:
        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        logutils.config(file_name='gsaoi_test_reduce_dark.log', mode='quiet')
        reduce_darks.runr()

        del reduce_darks

    logutils.config(file_name='gsaoi_test_reduce_bpm.log', mode='quiet')
    reduce_bpm = Reduce()
    reduce_bpm.files.extend(list_of_h_flats)
    reduce_bpm.files.extend(list_of_darks)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    del reduce_bpm

    logutils.config(file_name='gsaoi_test_reduce_flats.log', mode='quiet')
    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_kshort_flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    calib_files.append('processed_flat:{}'.format(
        reduce_flats.output_filenames[0]))

    del reduce_flats

    logutils.config(file_name='gsaoi_test_reduce_science.log', mode='quiet')
    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
    reduce_target.runr()

    del reduce_target
Exemplo n.º 19
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='f2_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(os.path.join(test_path, 'F2/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    darks_3s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==3'))

    darks_20s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==20'))

    darks_120s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==120'))

    flats = dataselect.select_data(all_files, ['F2', 'FLAT', 'RAW'], [],
                                   dataselect.expr_parser('filter_name=="Y"'))

    science = dataselect.select_data(
        all_files, ['F2', 'RAW'], ['CAL'],
        dataselect.expr_parser('filter_name=="Y"'))

    for darks in [darks_3s, darks_20s, darks_120s]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(flats)
    reduce_bpm.files.extend(darks_3s)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(science)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)