Example #1
0
    def test_slitdark_in_calservice(self, get_or_create_tmpdir, do_slit_dark):
        """
        Check that:

        - A bias slit calibrator exists in the local calibrations dir;
        - It can be retrieved using a getProcessedSlitBias call.
        """

        # Ensure the slit dark reduction has been done
        _, _, _ = do_slit_dark
        _, cal_service = get_or_create_tmpdir
        # import pdb; pdb.set_trace()

        assert len(glob.glob(os.path.join(
            os.getcwd(), 'calibrations', 'processed_dark', '*dark*slit*.fits'
        ))) == 1, "Couldn't find the stored slit bias in the calibrations " \
                  "system OR found multiples\n " \
                  "(calibration ls: {})\n" \
                  "(caldb contents: {})".format(
            glob.glob(os.path.join(os.getcwd(), 'calibrations',
                                   'processed_dark', '*')),
            [_ for _ in cal_service.list_files()],
        )

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        # Use one of the 'dark slit' files to try and retrieve the slit bias
        reduce.files = glob.glob(
            os.path.join(os.getcwd(), 'flat95*MEF_2x2_slit.fits'))
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeRetrieveSlitDarkTest'
        # reduce.mode = ['sq', ]
        reduce.logfile = os.path.join(os.getcwd(),
                                      'reduce_slitdark_retrieve.log')
        reduce.logmode = 'quiet'
        reduce.suffix = '_testSlitDarkRetrieve'
        # FIXME cal_service will hopefully find the calibration itself later
        # reduce.ucals = normalize_ucals(reduce.files, [
        #     'processed_dark:{}'.format(
        #         glob.glob(os.path.join(
        #             'calibrations',
        #             'processed_dark',
        #             '*slit*dark*.fits'))[0]),
        # ])
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)

        try:
            reduce.runr()
        except IOError as e:
            assert 0, 'Calibration system could not locate the slit bias ' \
                      'frame ({})'.format(e.message)
        finally:
            # Teardown code
            for _ in glob.glob(
                    os.path.join(os.getcwd(),
                                 '*{}.fits'.format(reduce.suffix)), ):
                os.remove(_)
Example #2
0
def test_primitive_not_found():
    testfile = download_from_archive("N20160524S0119.fits")

    red = Reduce()
    red.files = [testfile]
    red.recipename = 'foobar'
    with pytest.raises(RecipeNotFound, match='No primitive named foobar'):
        red.runr()
Example #3
0
def reduce(file_list,
           label,
           calib_files,
           recipe_name=None,
           save_to=None,
           user_pars=None):
    """
    Helper function used to prevent replication of code.

    Parameters
    ----------
    file_list : list
        List of files that will be reduced.
    label : str
        Labed used on log files name.
    calib_files : list
        List of calibration files properly formatted for DRAGONS Reduce().
    recipe_name : str, optional
        Name of the recipe used to reduce the data.
    save_to : str, optional
        Stores the calibration files locally in a list.
    user_pars : list, optional
        List of user parameters

    Returns
    -------
    str : Output reduced file.
    list : An updated list of calibration files.
    """
    objgraph = pytest.importorskip("objgraph")

    logutils.get_logger().info("\n\n\n")
    logutils.config(file_name=f"test_image_{label}.log")
    r = Reduce()
    r.files = file_list
    r.ucals = normalize_ucals(r.files, calib_files)
    r.uparms = user_pars

    if recipe_name:
        r.recipename = recipe_name

    r.runr()
    output_file = r.output_filenames[0]

    if save_to:
        calib_files.append("{}:{}".format(
            save_to,
            os.path.join("calibrations", save_to, r.output_filenames[0])))
        [os.remove(f) for f in r.output_filenames]

    # check that we are not leaking objects
    assert len(objgraph.by_type('NDAstroData')) == 0

    return output_file, calib_files
    def do_overscan_subtract(self, get_or_create_tmpdir, request):
        """
        Run overscan correction on the main data.

        .. note::
            Fixture.
        """
        # Copy the raw data file into here
        rawfilename = 'bias*{}*.fits'.format(request.param)
        tmpsubdir, cal_service = get_or_create_tmpdir
        # Make sure we're working inside the temp dir
        # rawfiles = glob.glob(os.path.join(
        #     os.path.dirname(os.path.abspath(__file__)),
        #     'testdata',
        #     rawfilename))
        # shutil.copy(
        #     rawfiles[0],
        #     os.path.join(tmpsubdir.dirname, tmpsubdir.basename))
        rawfile = glob.glob(
            os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                         rawfilename))[0]

        # Do the overscan subtraction
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        reduce.files = [
            rawfile,
        ]
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeBiasRemoveOverscan'
        reduce.logfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                      'reduce_overscancorrect.log')
        reduce.logmode = 'quiet'
        reduce.suffix = '_testOverscanCorrect'
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)
        reduce.runr()

        corrfilename = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                    '*' + reduce.suffix + '.fits')
        corrfilename = glob.glob(corrfilename)[0]
        corrfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                corrfilename)

        # Return filenames of raw, subtracted files
        yield rawfile, corrfile

        # Execute teardown code
        for _ in glob.glob(
                os.path.join(os.getcwd(), '*{}.fits'.format(reduce.suffix))):
            os.remove(_)
Example #5
0
    def test_slitbias_in_calservice(self, get_or_create_tmpdir):
        """
        Check that:

        - A bias slit calibrator exists in the local calibrations dir;
        - It can be retrieved using a getProcessedSlitBias call.
        """
        assert len(glob.glob(os.path.join(
            os.getcwd(), 'calibrations', 'processed_bias', '*bias*slit*.fits'
        ))) == 1, "Couldn't find the stored slit bias in the calibrations " \
                  "system OR found multiples"

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        # Use one of the 'dark slit' files to try and retrieve the slit bias
        reduce.files = [
            os.path.join(os.getcwd(), 'dark95_1_MEF_2x2_slit.fits'),
        ]
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeRetrieveSlitBiasTest'
        # reduce.mode = ['sq', ]
        # reduce.recipename = 'makeProcessedBias'
        reduce.logfile = os.path.join(os.getcwd(),
                                      'reduce_slitbias_retrieve.log')
        # FIXME cal_service will hopefully find the calibration itself later
        reduce.ucals = normalize_ucals(reduce.files, [
            'processed_bias:{}'.format(
                glob.glob(
                    os.path.join('calibrations', 'processed_bias',
                                 '*slit*bias*.fits'))[0]),
        ])
        reduce.logmode = 'quiet'
        reduce.suffix = '_testSlitBiasRetrieve'
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)

        try:
            reduce.runr()
        except IOError as e:
            assert 0, 'Calibration system could not locate the slit bias ' \
                      'frame ({})'.format(e.message)
        finally:
            # Teardown code
            for _ in glob.glob(
                    os.path.join(os.getcwd(),
                                 '*{}.fits'.format(reduce.suffix)), ):
                os.remove(_)
Example #6
0
def processed_slit_illum(change_working_dir, path_to_inputs, request):
    """
    Returns the processed slit illumination function that will be analysed.

    Parameters
    ----------
    change_working_dir : pytest.fixture
        Fixture that changes the working directory (see :mod:`astrodata.testing`).
    path_to_inputs : pytest.fixture
        Fixture defined in :mod:`astrodata.testing` with the path to the
        pre-processed input file.
    request : pytest.fixture
        PyTest built-in fixture containing information about parent test.

    Returns
    -------
    AstroData
        Input spectrum processed up to right before the `applyQECorrection`.
    """
    twi_filename = request.param
    twi_path = download_from_archive(twi_filename)
    twi_ad = astrodata.open(twi_path)

    print(twi_ad.tags)

    master_bias = os.path.join(
        path_to_inputs, associated_calibrations[twi_filename])

    assert os.path.exists(master_bias)

    calibration_files = ['processed_bias:{}'.format(master_bias)]

    with change_working_dir():
        print("Reducing SLITILLUM in folder:\n  {}".format(os.getcwd()))
        logutils.config(
            file_name='log_flat_{}.txt'.format(twi_ad.data_label()))

        reduce = Reduce()
        reduce.files.extend([twi_path])
        reduce.mode = 'sq'
        reduce.recipename = 'makeProcessedSlitIllum'
        reduce.ucals = normalize_ucals(reduce.files, calibration_files)
        reduce.runr()

        _processed_twi_filename = reduce.output_filenames.pop()
        _processed_twi = astrodata.open(_processed_twi_filename)

    return _processed_twi
Example #7
0
    def reduce(filelist, saveto=None, label='', calib_files=None,
               recipename=None):
        red = Reduce()
        assert len(red.files) == 0
        red.files.extend(filelist)
        assert len(red.files) == len(filelist)
        if calib_files:
            red.ucals = normalize_ucals(red.files, calib_files)
        if recipename:
            red.recipename = recipename
        red.runr()
        if saveto:
            calib_files.append(f'{saveto}:{red.output_filenames[0]}')

        # check that we are not leaking objects
        assert len(objgraph.by_type('NDAstroData')) == 0
Example #8
0
    def do_slit_bias(self, get_or_create_tmpdir):
        """
        Reduce the bias slit test data.

        .. note::
            Fixture.
        """
        rawfilename = 'bias*slit*.fits'
        # Copy the raw data file into here
        tmpsubdir, cal_service = get_or_create_tmpdir
        # Find all the relevant files
        rawfiles = glob.glob(
            os.path.join(tmpsubdir.dirname, tmpsubdir.basename, rawfilename))

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        reduce.files = rawfiles
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeSlitBiasTest'
        reduce.logfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                      'reduce_slitbias.log')
        reduce.logmode = 'quiet'
        reduce.suffix = '_testSlitBias'
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)
        reduce.runr()

        corrfilename = '*' + reduce.suffix + '.fits'
        corrfilename = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                    glob.glob(corrfilename)[0])
        corrfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                corrfilename)

        # Return filenames of raw, subtracted files
        yield rawfiles, corrfile

        # Execute teardown code

        for _ in glob.glob(
                os.path.join(
                    os.getcwd(),
                    # rawfilename,
                    corrfilename,
                )):
            os.remove(_)
Example #9
0
    def do_slit_dark(self, get_or_create_tmpdir):
        """
        Reduce the test slit dark data.

        .. note::
            Fixture.
        """
        rawfilename = 'dark*slit*.fits'
        # Copy the raw data file into here
        tmpsubdir, cal_service = get_or_create_tmpdir
        # Find all the relevant files
        rawfiles = glob.glob(
            os.path.join(tmpsubdir.dirname, tmpsubdir.basename, rawfilename))

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        reduce.files = rawfiles
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeSlitDarkTest'
        # Make sure refresh is used for all primitives
        reduce.upars = [
            'refresh=True',
        ]
        # FIXME cal_service will hopefully find the calibration itself later
        calibs = {
            'processed_bias':
            glob.glob(
                os.path.join('calibrations', 'processed_bias',
                             '*slit*bias*.fits'))[0],
        }
        reduce.ucals = normalize_ucals(
            reduce.files, ['{}:{}'.format(k, v) for k, v in calibs.items()])
        reduce.logfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                      'reduce_slitdark.log')
        reduce.logmode = 'standard'
        reduce.suffix = '_testSlitDark'
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)
        reduce.runr()

        corrfilename = '*' + reduce.suffix + '.fits'
        corrfilename = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                    glob.glob(corrfilename)[0])
        corrfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                corrfilename)

        # Return filenames of raw, subtracted files
        yield rawfiles, corrfile, calibs

        # import pdb; pdb.set_trace()

        # Execute teardown code
        for _ in glob.glob(
                os.path.join(
                    os.getcwd(),
                    # rawfilename,
                    corrfilename,
                )):
            os.remove(_)
Example #10
0
def test_reduce_image(change_working_dir):
    with change_working_dir():
        calib_files = []
        all_files = [download_from_archive(f) for f in datasets]
        all_files.sort()
        assert len(all_files) > 1

        darks_3s = dataselect.select_data(
            all_files, ['F2', 'DARK', 'RAW'], [],
            dataselect.expr_parser('exposure_time==3'))
        darks_3s.sort()

        darks_20s = dataselect.select_data(
            all_files, ['F2', 'DARK', 'RAW'], [],
            dataselect.expr_parser('exposure_time==20'))
        darks_20s.sort()

        darks_120s = dataselect.select_data(
            all_files, ['F2', 'DARK', 'RAW'], [],
            dataselect.expr_parser('exposure_time==120'))
        darks_120s.sort()

        flats = dataselect.select_data(
            all_files, ['F2', 'FLAT', 'RAW'], [],
            dataselect.expr_parser('filter_name=="Y"'))
        flats.sort()

        science = dataselect.select_data(
            all_files, ['F2', 'RAW'], ['CAL'],
            dataselect.expr_parser('filter_name=="Y"'))
        science.sort()

        for darks in [darks_3s, darks_20s, darks_120s]:
            reduce_darks = Reduce()
            assert len(reduce_darks.files) == 0

            reduce_darks.files.extend(darks)
            assert len(reduce_darks.files) == len(darks)

            logutils.config(file_name='f2_test_reduce_darks.log', mode='quiet')
            reduce_darks.runr()

            calib_files.append('processed_dark:{}'.format(
                reduce_darks.output_filenames[0]))

        logutils.config(file_name='f2_test_reduce_bpm.log', mode='quiet')
        reduce_bpm = Reduce()
        reduce_bpm.files.extend(flats)
        assert len(reduce_bpm.files) == len(flats)

        reduce_bpm.files.extend(darks_3s)
        assert len(reduce_bpm.files) == len(flats) + len(darks_3s)

        reduce_bpm.recipename = 'makeProcessedBPM'
        reduce_bpm.runr()

        bpm_filename = reduce_bpm.output_filenames[0]

        logutils.config(file_name='f2_test_reduce_flats.log', mode='quiet')
        reduce_flats = Reduce()
        reduce_flats.files.extend(flats)
        reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
        reduce_flats.runr()

        calib_files.append('processed_flat:{}'.format(
            reduce_flats.output_filenames[0]))

        logutils.config(file_name='f2_test_reduce_science.log', mode='quiet')
        reduce_target = Reduce()
        reduce_target.files.extend(science)
        reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
        reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
        reduce_target.runr()
Example #11
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='gsaoi_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(
        os.path.join(test_path, 'GSAOI/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    list_of_darks = dataselect.select_data(all_files, ['DARK'], [])

    list_of_kshort_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="Kshort"'))

    list_of_h_flats = dataselect.select_data(
        all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="H"'))

    list_of_std_LHS_2026 = dataselect.select_data(
        all_files, [], [], dataselect.expr_parser('object=="LHS 2026"'))

    list_of_std_cskd8 = dataselect.select_data(
        all_files, [], [], dataselect.expr_parser('object=="cskd-8"'))

    list_of_science_files = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser(
            'observation_class=="science" and exposure_time==60.'))

    for darks in [list_of_darks]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(list_of_h_flats)
    reduce_bpm.files.extend(list_of_darks)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_kshort_flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)
Example #12
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='gsaoi_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(
        os.path.join(test_path, 'GSAOI/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    list_of_darks = dataselect.select_data(
        all_files, ['DARK'], [])

    list_of_kshort_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="Kshort"'))

    list_of_h_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="H"'))

    list_of_std_LHS_2026 = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser('object=="LHS 2026"'))

    list_of_std_cskd8 = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser('object=="cskd-8"'))

    list_of_science_files = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser('observation_class=="science" and exposure_time==60.'))

    for darks in [list_of_darks]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(list_of_h_flats)
    reduce_bpm.files.extend(list_of_darks)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_kshort_flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)
Example #13
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='f2_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(
        os.path.join(test_path, 'F2/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    darks_3s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==3'))

    darks_20s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==20'))

    darks_120s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==120'))

    flats = dataselect.select_data(
        all_files, ['F2', 'FLAT', 'RAW'], [],
        dataselect.expr_parser('filter_name=="Y"'))

    science = dataselect.select_data(
        all_files, ['F2', 'RAW'], ['CAL'],
        dataselect.expr_parser('filter_name=="Y"'))

    for darks in [darks_3s, darks_20s, darks_120s]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(flats)
    reduce_bpm.files.extend(darks_3s)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(science)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)
Example #14
0
def test_reduce_image(path_to_inputs):
    calib_files = []

    all_files = glob.glob(
        os.path.join(path_to_inputs, 'F2/test_reduce/', '*.fits'))

    all_files.sort()

    assert len(all_files) > 1

    darks_3s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==3'))
    darks_3s.sort()

    darks_20s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==20'))
    darks_20s.sort()

    darks_120s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==120'))
    darks_120s.sort()

    flats = dataselect.select_data(all_files, ['F2', 'FLAT', 'RAW'], [],
                                   dataselect.expr_parser('filter_name=="Y"'))
    flats.sort()

    science = dataselect.select_data(
        all_files, ['F2', 'RAW'], ['CAL'],
        dataselect.expr_parser('filter_name=="Y"'))
    science.sort()

    for darks in [darks_3s, darks_20s, darks_120s]:
        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        logutils.config(file_name='f2_test_reduce_darks.log', mode='quiet')
        reduce_darks.runr()

        calib_files.append('processed_dark:{}'.format(
            reduce_darks.output_filenames[0]))

    logutils.config(file_name='f2_test_reduce_bpm.log', mode='quiet')
    reduce_bpm = Reduce()
    reduce_bpm.files.extend(flats)
    reduce_bpm.files.extend(darks_3s)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    logutils.config(file_name='f2_test_reduce_flats.log', mode='quiet')
    reduce_flats = Reduce()
    reduce_flats.files.extend(flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    calib_files.append('processed_flat:{}'.format(
        reduce_flats.output_filenames[0]))

    logutils.config(file_name='f2_test_reduce_science.log', mode='quiet')
    reduce_target = Reduce()
    reduce_target.files.extend(science)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
    reduce_target.runr()
Example #15
0
def test_reduce_image(path_to_inputs):
    calib_files = []

    all_files = glob.glob(
        os.path.join(path_to_inputs, 'GSAOI/test_reduce/', '*.fits'))

    all_files.sort()

    assert len(all_files) > 1

    list_of_darks = dataselect.select_data(all_files, ['DARK'], [])
    list_of_darks.sort()

    list_of_kshort_flats = dataselect.select_data(
        all_files, ['FLAT'], [],
        dataselect.expr_parser('filter_name=="Kshort"'))
    list_of_kshort_flats.sort()

    list_of_h_flats = dataselect.select_data(
        all_files, ['FLAT'], [], dataselect.expr_parser('filter_name=="H"'))
    list_of_h_flats.sort()

    list_of_science_files = dataselect.select_data(
        all_files, [], [],
        dataselect.expr_parser(
            'observation_class=="science" and exposure_time==60.'))
    list_of_science_files.sort()

    for darks in [list_of_darks]:
        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        logutils.config(file_name='gsaoi_test_reduce_dark.log', mode='quiet')
        reduce_darks.runr()

        del reduce_darks

    logutils.config(file_name='gsaoi_test_reduce_bpm.log', mode='quiet')
    reduce_bpm = Reduce()
    reduce_bpm.files.extend(list_of_h_flats)
    reduce_bpm.files.extend(list_of_darks)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    del reduce_bpm

    logutils.config(file_name='gsaoi_test_reduce_flats.log', mode='quiet')
    reduce_flats = Reduce()
    reduce_flats.files.extend(list_of_kshort_flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    calib_files.append('processed_flat:{}'.format(
        reduce_flats.output_filenames[0]))

    del reduce_flats

    logutils.config(file_name='gsaoi_test_reduce_science.log', mode='quiet')
    reduce_target = Reduce()
    reduce_target.files.extend(list_of_science_files)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.ucals = normalize_ucals(reduce_target.files, calib_files)
    reduce_target.runr()

    del reduce_target
Example #16
0
    def do_bias_subtract(self, get_or_create_tmpdir, request):
        """
        Perform basic bias subtraction on the dark frame.

        .. note::
            Fixture.
        """
        rawfilename = 'dark*{}*.fits'.format(request.param)
        # Copy the raw data file into here
        tmpsubdir, cal_service = get_or_create_tmpdir
        # Find all the relevant files
        # rawfiles = glob.glob(os.path.join(os.path.dirname(
        #     os.path.abspath(__file__)),
        #     'testdata',
        #     rawfilename))
        # for f in rawfiles:
        #     shutil.copy(f, os.path.join(tmpsubdir.dirname, tmpsubdir.basename))
        rawfiles = glob.glob(
            os.path.join(tmpsubdir.dirname, tmpsubdir.basename, rawfilename))

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        reduce.files = rawfiles[0]
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeDarkBiasCorrect'
        # reduce.mode = ['sq', ]
        # reduce.recipename = 'makeProcessedBias'
        reduce.logfile = os.path.join(
            tmpsubdir.dirname, tmpsubdir.basename,
            'reduce_biascorrect_{}.log'.format(request.param))
        reduce.logmode = 'quiet'
        reduce.suffix = '_{}_testBiasCorrect'.format(request.param)
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)
        # import pdb; pdb.set_trace()
        calibs = {
            'processed_bias':
            glob.glob(
                os.path.join('calibrations', 'processed_bias',
                             'bias*{}*.fits'.format(request.param)))[0],
        }
        reduce.ucals = normalize_ucals(
            reduce.files, ['{}:{}'.format(k, v) for k, v in calibs.items()])

        # import pdb;
        # pdb.set_trace()
        reduce.runr()

        corrfilename = '*' + reduce.suffix + '.fits'
        corrfilename = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                    glob.glob(corrfilename)[0])
        corrfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                corrfilename)

        # Return filenames of raw, subtracted files
        yield rawfiles, corrfile, calibs

        # Execute teardown code
        for _ in glob.glob(
                os.path.join(os.getcwd(), '*{}.fits'.format(reduce.suffix))):
            os.remove(_)
Example #17
0
    def do_master_flat(self, get_or_create_tmpdir, request):
        """
        Run the recipeFlatCreateMaster recipe.

        .. note::
            Fixture.
        """
        arm, res = request.param
        rawfilename = 'flat*{}*{}*.fits'.format(res, arm)
        # Copy the raw data file into here
        tmpsubdir, cal_service = get_or_create_tmpdir
        # Find all the relevant files
        # rawfiles = glob.glob(os.path.join(os.path.dirname(
        #     os.path.abspath(__file__)),
        #     'testdata',
        #     rawfilename))
        # for f in rawfiles:
        #     shutil.copy(f, os.path.join(tmpsubdir.dirname, tmpsubdir.basename))
        rawfiles = glob.glob(
            os.path.join(tmpsubdir.dirname, tmpsubdir.basename, rawfilename))

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        reduce.files = rawfiles
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeFlatCreateMaster'
        # reduce.mode = ['sq', ]
        # reduce.recipename = 'makeProcessedBias'
        reduce.logfile = os.path.join(
            tmpsubdir.dirname, tmpsubdir.basename,
            'reduce_masterflat_{}_{}.log'.format(res, arm))
        reduce.logmode = 'quiet'
        reduce.suffix = '_{}_{}_testMasterFlat'.format(res, arm)
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)
        # import pdb; pdb.set_trace()
        calibs = {
            'processed_bias':
            glob.glob(
                os.path.join('calibrations', 'processed_bias',
                             'bias*{}*.fits'.format(arm)))[0],
            'processed_dark':
            glob.glob(
                os.path.join('calibrations', 'processed_dark',
                             'dark*{}*.fits'.format(arm)))[0],
            'processed_slitflat':
            glob.glob(
                os.path.join('calibrations', 'processed_slitflat',
                             'flat*{}*slitflat*.fits'.format(res)))[0],
        }
        reduce.ucals = normalize_ucals(
            reduce.files, ['{}:{}'.format(k, v) for k, v in calibs.items()])

        reduce.runr()
        if res == 'std' and arm == 'red':
            pass
            # import pdb;
            # pdb.set_trace()

        corrfilename = '*' + reduce.suffix + '.fits'
        corrfilename = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                    glob.glob(corrfilename)[0])
        corrfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                corrfilename)

        # Return filenames of raw, subtracted files
        yield rawfiles, corrfile, calibs

        # Execute teardown code
        pass
Example #18
0
    def do_master_bias(self, get_or_create_tmpdir, request):
        """
        Perform bias subtraction on the main data.

        .. note::
            Fixture.
        """
        rawfilename = 'bias*{}*.fits'.format(request.param)
        # Copy the raw data file into here
        tmpsubdir, cal_service = get_or_create_tmpdir
        # Find all the relevant files
        # rawfiles = glob.glob(os.path.join(os.path.dirname(
        #     os.path.abspath(__file__)),
        #     'testdata',
        #     rawfilename))
        # for f in rawfiles:
        #     shutil.copy(f, os.path.join(tmpsubdir.dirname, tmpsubdir.basename))
        rawfiles = glob.glob(
            os.path.join(tmpsubdir.dirname, tmpsubdir.basename, rawfilename))

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        reduce.files = rawfiles
        reduce.mode = [
            'test',
        ]
        reduce.recipename = 'recipeBiasCreateMaster'
        reduce.upars = [
            'refresh=True',
        ]
        # reduce.mode = ['sq', ]
        # reduce.recipename = 'makeProcessedBias'
        reduce.logfile = os.path.join(
            tmpsubdir.dirname, tmpsubdir.basename,
            'reduce_masterbias_{}.log'.format(request.param))
        reduce.logmode = 'quiet'
        reduce.suffix = '_{}_testMasterBias'.format(request.param)
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)

        reduce.runr()
        # import pdb; pdb.set_trace()

        corrfilename = '*' + reduce.suffix + '.fits'
        corrfilename = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                    glob.glob(corrfilename)[0])
        corrfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                corrfilename)

        # Find the overscan-corrected bias files
        rawfiles = glob.glob(
            os.path.join(
                tmpsubdir.dirname,
                tmpsubdir.basename,
                rawfilename.split('.')[0] + '*_overscanCorrect*.fits',
            ))

        # Return filenames of raw, subtracted files
        yield rawfiles, corrfile

        # Execute teardown code
        pass
Example #19
0
    def do_slit_arc(self, request, get_or_create_tmpdir):
        """
        Reduce the test slit arc data.

        .. note::
            Fixture.
        """

        # import pdb; pdb.set_trace()

        # rawfilename = '{}*slit*.fits'.format(slit_type)
        # Copy the raw data file into here
        tmpsubdir, cal_service = get_or_create_tmpdir
        slit_type, res = request.param
        filenames = glob.glob('{}*{}*slit.fits'.format(slit_type, res))

        # Do the master bias generation
        reduce = Reduce()
        reduce.drpkg = 'ghostdr'
        reduce.mode = ['test', ]
        reduce.recipename = 'recipeSlitArcTest' if slit_type == 'arc' \
            else 'recipeSlitTest'
        # Make sure refresh is used for all primitives
        reduce.upars = ['refresh=True', ]
        # FIXME cal_service will hopefully find the calibration itself later
        calibs = {
            'processed_bias': glob.glob(os.path.join(
                'calibrations',
                'processed_bias',
                '*slit*bias*.fits'))[0],
            'processed_dark': glob.glob(os.path.join(
                    'calibrations',
                    'processed_dark',
                    '*slit*dark*.fits'))[0],
            'processed_slitflat': glob.glob(os.path.join(
                'calibrations',
                'processed_slitflat',
                '*{}*slit*slitflat*.fits'.format(res, )))[0]
        }
        reduce.logfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                      'reduce_slit{}.log'.format(slit_type))
        reduce.logmode = 'standard'
        reduce.suffix = '_{}_testSlit'.format(slit_type)
        logutils.config(file_name=reduce.logfile, mode=reduce.logmode)

        corrfiles = []
        for filename in filenames:
            reduce.files = [filename, ]
            reduce.ucals = normalize_ucals(reduce.files, [
                '{}:{}'.format(k, v) for k, v in calibs.items()
            ])
            reduce.runr()

            # import pdb; pdb.set_trace()
            corrfilename = '*' + reduce.suffix + '.fits'
            corrfilename = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                        glob.glob(corrfilename)[0])
            corrfile = os.path.join(tmpsubdir.dirname, tmpsubdir.basename,
                                    corrfilename)
            corrfiles.append(corrfile)

        # Return filenames of raw, subtracted files
        yield filenames, corrfiles, calibs

        # import pdb; pdb.set_trace()

        # Execute teardown code
        for _ in glob.glob(os.path.join(
                os.getcwd(),
                # rawfilename,
                corrfilename,
        )):
            os.remove(_)
Example #20
0
def test_reduce_image(test_path, caldb):

    logutils.config(file_name='f2_test_reduce_image.log')

    caldb.init(wipe=True)

    all_files = glob.glob(os.path.join(test_path, 'F2/test_reduce/', '*.fits'))
    assert len(all_files) > 1

    darks_3s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==3'))

    darks_20s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==20'))

    darks_120s = dataselect.select_data(
        all_files, ['F2', 'DARK', 'RAW'], [],
        dataselect.expr_parser('exposure_time==120'))

    flats = dataselect.select_data(all_files, ['F2', 'FLAT', 'RAW'], [],
                                   dataselect.expr_parser('filter_name=="Y"'))

    science = dataselect.select_data(
        all_files, ['F2', 'RAW'], ['CAL'],
        dataselect.expr_parser('filter_name=="Y"'))

    for darks in [darks_3s, darks_20s, darks_120s]:

        reduce_darks = Reduce()
        assert len(reduce_darks.files) == 0

        reduce_darks.files.extend(darks)
        assert len(reduce_darks.files) == len(darks)

        reduce_darks.runr()

        caldb.add_cal(reduce_darks.output_filenames[0])

    reduce_bpm = Reduce()
    reduce_bpm.files.extend(flats)
    reduce_bpm.files.extend(darks_3s)
    reduce_bpm.recipename = 'makeProcessedBPM'
    reduce_bpm.runr()

    bpm_filename = reduce_bpm.output_filenames[0]

    reduce_flats = Reduce()
    reduce_flats.files.extend(flats)
    reduce_flats.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_flats.runr()

    caldb.add_cal(reduce_flats.output_filenames[0])

    reduce_target = Reduce()
    reduce_target.files.extend(science)
    reduce_target.uparms = [('addDQ:user_bpm', bpm_filename)]
    reduce_target.runr()

    for f in caldb.list_files():
        print(f)