Beispiel #1
0
def test_frame_avg(make_rampmodel, make_darkmodel):
    '''Check that if NFRAMES>1 or GROUPGAP>0, the frame-averaged dark data are
    subtracted group-by-group from science data groups and the ERR arrays are not modified'''

    # size of integration
    nints = 1
    ngroups = 5
    xsize = 1032
    ysize = 1024

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 4
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups-1):
        dm_ramp.data[:, i] = i + 1

    # create dark reference file model

    refgroups = 20  # This needs to be 20 groups for the calculations to work
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # dark frames should be averaged in groups of 4 frames

    # this will result in average values of 0.15, 0.55, 0.95, and 1.35
    # these values are then subtracted from frame values of 1, 2, 3 and 4

    assert outfile.data[0, 0, 500, 500] == pytest.approx(0.85)
    assert outfile.data[0, 1, 500, 500] == pytest.approx(1.45)
    assert outfile.data[0, 2, 500, 500] == pytest.approx(2.05)
    assert outfile.data[0, 3, 500, 500] == pytest.approx(2.65)

    # check that the error array is not modified.
    np.testing.assert_array_equal(outfile.err[:, :], 0)
Beispiel #2
0
def test_frame_avg(make_rampmodel, make_darkmodel):
    '''Check that if NFRAMES>1 or GROUPGAP>0, the frame-averaged dark data are
    subtracted group-by-group from science data groups and the ERR arrays are not modified'''

    # size of integration
    nints = 1
    ngroups = 5
    xsize = 1032
    ysize = 1024

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 4
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups - 1):
        dm_ramp.data[:, i] = i + 1

    # create dark reference file model

    refgroups = 20  # This needs to be 20 groups for the calculations to work
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # dark frames should be averaged in groups of 4 frames

    # this will result in average values of 0.15, 0.55, 0.95, and 1.35
    # these values are then subtracted from frame values of 1, 2, 3 and 4

    assert outfile.data[0, 0, 500, 500] == pytest.approx(0.85)
    assert outfile.data[0, 1, 500, 500] == pytest.approx(1.45)
    assert outfile.data[0, 2, 500, 500] == pytest.approx(2.05)
    assert outfile.data[0, 3, 500, 500] == pytest.approx(2.65)

    # check that the error array is not modified.
    np.testing.assert_array_equal(outfile.err[:, :], 0)
Beispiel #3
0
def test_more_sci_frames(make_rampmodel, make_darkmodel):
    '''Check that data is unchanged if there are more frames in the science
    data is than in the dark reference file and verify that when the dark is not applied,
    the data is correctly flagged as such'''


    # size of integration
    nints = 1
    ngroups = 7
    xsize = 200
    ysize = 200


    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups-1):
        dm_ramp.data[0, i] = i

    refgroups = 5
    # create dark reference file model with fewer frames than science data
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # test that the science data are not changed; input file = output file
    np.testing.assert_array_equal(dm_ramp.data, outfile.data)

    # get dark correction status from header
    darkstatus = outfile.meta.cal_step.dark_sub
    print('Dark status', darkstatus)

    assert darkstatus == 'SKIPPED'
Beispiel #4
0
def test_sub_by_frame(make_rampmodel, make_darkmodel):
    '''Check that if NFRAMES=1 and GROUPGAP=0 for the science data, the dark reference data are
    directly subtracted frame by frame'''

    # size of integration
    nints = 1
    ngroups = 10
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups - 1):
        dm_ramp.data[0, i] = i

    # create dark reference file model with more frames than science data
    refgroups = 15
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # remove the single dimension at start of file (1, 30, 1032, 1024) so comparison in assert works
    outdata = np.squeeze(outfile.data)

    # check that the dark file is subtracted frame by frame from the science data
    diff = dm_ramp.data[0] - dark.data[0, :ngroups]

    # test that the output data file is equal to the difference found when subtracting ref file from sci file

    np.testing.assert_array_equal(
        outdata, diff, err_msg='dark file should be subtracted from sci file ')
Beispiel #5
0
def test_dq_combine(make_rampmodel, make_darkmodel):
    '''Verify that the DQ array of the dark is correctly combined with the PIXELDQ array of the science data.'''

    # size of integration
    nints = 1
    ngroups = 5
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(1, ngroups-1):
        dm_ramp.data[0, i, :, :] = i

    # create dark reference file model with more frames than science data
    refgroups = 7
    dark = make_darkmodel(refgroups, ysize, xsize)

    jump_det = dqflags.pixel['JUMP_DET']
    saturated = dqflags.pixel['SATURATED']
    do_not_use = dqflags.pixel['DO_NOT_USE']

    # populate dq flags of sci pixeldq and reference dq
    dm_ramp.pixeldq[50, 50] = jump_det
    dm_ramp.pixeldq[50, 51] = saturated

    dark.dq[0, 0, 50, 50] = do_not_use
    dark.dq[0, 0, 50, 51] = do_not_use

    # run correction step
    outfile = darkcorr(dm_ramp, dark)

    # check that dq flags were correctly added
    assert outfile.pixeldq[50, 50] == np.bitwise_or(jump_det, do_not_use)
    assert outfile.pixeldq[50, 51] == np.bitwise_or(saturated, do_not_use)
Beispiel #6
0
def test_sub_by_frame(make_rampmodel, make_darkmodel):
    '''Check that if NFRAMES=1 and GROUPGAP=0 for the science data, the dark reference data are
    directly subtracted frame by frame'''

    # size of integration
    nints = 1
    ngroups = 10
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups-1):
        dm_ramp.data[0, i] = i

    # create dark reference file model with more frames than science data
    refgroups = 15
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # remove the single dimension at start of file (1, 30, 1032, 1024) so comparison in assert works
    outdata = np.squeeze(outfile.data)

    # check that the dark file is subtracted frame by frame from the science data
    diff = dm_ramp.data[0] - dark.data[0, :ngroups]

    # test that the output data file is equal to the difference found when subtracting ref file from sci file

    np.testing.assert_array_equal(outdata, diff, err_msg='dark file should be subtracted from sci file ')
Beispiel #7
0
def test_dq_combine(make_rampmodel, make_darkmodel):
    '''Verify that the DQ array of the dark is correctly combined with the PIXELDQ array of the science data.'''

    # size of integration
    nints = 1
    ngroups = 5
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(1, ngroups - 1):
        dm_ramp.data[0, i, :, :] = i

    # create dark reference file model with more frames than science data
    refgroups = 7
    dark = make_darkmodel(refgroups, ysize, xsize)

    jump_det = dqflags.pixel['JUMP_DET']
    saturated = dqflags.pixel['SATURATED']
    do_not_use = dqflags.pixel['DO_NOT_USE']

    # populate dq flags of sci pixeldq and reference dq
    dm_ramp.pixeldq[50, 50] = jump_det
    dm_ramp.pixeldq[50, 51] = saturated

    dark.dq[0, 0, 50, 50] = do_not_use
    dark.dq[0, 0, 50, 51] = do_not_use

    # run correction step
    outfile = darkcorr(dm_ramp, dark)

    # check that dq flags were correctly added
    assert outfile.pixeldq[50, 50] == np.bitwise_or(jump_det, do_not_use)
    assert outfile.pixeldq[50, 51] == np.bitwise_or(saturated, do_not_use)
Beispiel #8
0
def test_more_sci_frames(make_rampmodel, make_darkmodel):
    '''Check that data is unchanged if there are more frames in the science
    data is than in the dark reference file and verify that when the dark is not applied,
    the data is correctly flagged as such'''

    # size of integration
    nints = 1
    ngroups = 7
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups - 1):
        dm_ramp.data[0, i] = i

    refgroups = 5
    # create dark reference file model with fewer frames than science data
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # test that the science data are not changed; input file = output file
    np.testing.assert_array_equal(dm_ramp.data, outfile.data)

    # get dark correction status from header
    darkstatus = outfile.meta.cal_step.dark_sub
    print('Dark status', darkstatus)

    assert darkstatus == 'SKIPPED'
Beispiel #9
0
def test_2_int(make_rampmodel, make_darkmodel):
    '''Verify the dark correction is done by integration for MIRI observations'''

    # size of integration
    nints = 2
    ngroups = 10
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups-1):
        dm_ramp.data[:, i] = i

    # create dark reference file model with more frames than science data
    refgroups = 15
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1
        dark.data[1, i] = i * 0.2

    # run correction
    outfile = darkcorr(dm_ramp, dark)

    # check that the dark file is subtracted frame by frame from the science data
    diff = dm_ramp.data[0] - dark.data[0, :ngroups]
    diff_int2 = dm_ramp.data[1] - dark.data[1, :ngroups]

    # test that the output data file is equal to the difference found when subtracting ref file from sci file
    np.testing.assert_array_equal(outfile.data[0], diff)
    np.testing.assert_array_equal(outfile.data[1], diff_int2)
Beispiel #10
0
def test_2_int(make_rampmodel, make_darkmodel):
    '''Verify the dark correction is done by integration for MIRI observations'''

    # size of integration
    nints = 2
    ngroups = 10
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups - 1):
        dm_ramp.data[:, i] = i

    # create dark reference file model with more frames than science data
    refgroups = 15
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1
        dark.data[1, i] = i * 0.2

    # run correction
    outfile = darkcorr(dm_ramp, dark)

    # check that the dark file is subtracted frame by frame from the science data
    diff = dm_ramp.data[0] - dark.data[0, :ngroups]
    diff_int2 = dm_ramp.data[1] - dark.data[1, :ngroups]

    # test that the output data file is equal to the difference found when subtracting ref file from sci file
    np.testing.assert_array_equal(outfile.data[0], diff)
    np.testing.assert_array_equal(outfile.data[1], diff_int2)
Beispiel #11
0
def test_nan(make_rampmodel, make_darkmodel):
    '''Verify that when a dark has NaNs, these are correctly assumed as zero and the PIXELDQ is set properly'''

    # size of integration
    nints = 1
    ngroups = 10
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups-1):
        dm_ramp.data[0, i, :, :] = i

    # create dark reference file model with more frames than science data
    refgroups = 15
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # set NaN in dark file

    dark.data[0, 5, 100, 100] = np.nan

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # test that the NaN dark reference pixel was set to 0 (nothing subtracted)

    assert outfile.data[0, 5, 100, 100] == 5.0
Beispiel #12
0
def test_nan(make_rampmodel, make_darkmodel):
    '''Verify that when a dark has NaNs, these are correctly assumed as zero and the PIXELDQ is set properly'''

    # size of integration
    nints = 1
    ngroups = 10
    xsize = 200
    ysize = 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups - 1):
        dm_ramp.data[0, i, :, :] = i

    # create dark reference file model with more frames than science data
    refgroups = 15
    dark = make_darkmodel(refgroups, ysize, xsize)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    # set NaN in dark file

    dark.data[0, 5, 100, 100] = np.nan

    # apply correction
    outfile = darkcorr(dm_ramp, dark)

    # test that the NaN dark reference pixel was set to 0 (nothing subtracted)

    assert outfile.data[0, 5, 100, 100] == 5.0