예제 #1
0
def test_side_averaging():
    '''For MIRI data, check that the mean value in the reference pixels is calculated for each amplifier
    using the average of the left and right side reference pixels.'''
    # Test that the left and right side pixels are averaged.

    # create input data
    # create model of data with 0 value array
    ngroups = 10
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, :, :4] = 1.0
    im.data[:, 1:, :, 1028:] = 2.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im)

    # average reference pixel value should be 1.5 (all 1's on left, all 2's on right)
    assert(out.data[0, 5, 100, 50] == 48.5)
예제 #2
0
def test_refpix_subarray():
    '''Check that the correction is skipped for MIR subarray data '''

    # For MIRI, no reference pixel correction is performed on subarray data
    # No changes should be seen in the data arrays before and after correction

    # create input data
    # create model of data with 0 value array
    ngroups = 10
    ysize = 224
    xsize = 288

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)
    im.meta.subarray.name = 'MASK1550'
    im.meta.subarray.ystart = 467

    # set reference pixel values left side
    im.data[:, 1:, :, 0] = 1.0
    im.data[:, 1:, :, 1] = 2.0
    im.data[:, 1:, :, 2] = 3.0
    im.data[:, 1:, :, 3] = 4.0

    outim = RefPixStep.call(im)

    diff = im.data[:, :, :, :] - outim.data[:, :, :, :]

    # test that the science data are not changed

    np.testing.assert_array_equal(np.full((1, ngroups, ysize, xsize), 0.0, dtype=float),
                                  diff, err_msg='no changes should be seen in array ')
예제 #3
0
def test_each_amp():
    '''Test that each amp is calculated separately using the average of left
     and right pixels'''

    # create input data
    # create model of data with 0 value array
    ngroups = 10
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    # set reference pixel values left and right side
    im.data[:, 1:, :, 0] = 1.0
    im.data[:, 1:, :, 1] = 2.0
    im.data[:, 1:, :, 2] = 3.0
    im.data[:, 1:, :, 3] = 4.0
    im.data[:, 1:, :, 1028] = 1.0
    im.data[:, 1:, :, 1029] = 2.0
    im.data[:, 1:, :, 1030] = 3.0
    im.data[:, 1:, :, 1031] = 4.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im)

    # for amp 1, value subtracted should be 1, for amp 2, value should be 2, etc.
    assert(out.data[0, 5, 100, 4] == 4.0)  # pick a random pixel in the 4th column
    assert(out.data[0, 5, 100, 5] == 3.0)
    assert(out.data[0, 5, 100, 6] == 2.0)
    assert(out.data[0, 5, 100, 7] == 1.0)
예제 #4
0
def test_nan_refpix():
    '''Verify that the reference pixels flagged DO_NOT_USE are not used in the calculation

    Test that flagging a reference pixel with DO_NOT_USE does not use the pixel in the
    average. Set the pixel to NaN, which results in a NaN average value if used. If the test
    passes, then the NaN was correctly flagged and rejected from the average.'''

    # create input data
    # create model of data with 0 value array
    ngroups = 5
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, :, :4] = 1.0
    im.data[:, 1:, :, 1028:] = 2.0
    im.data[0, 3, 50, 3] = np.nan

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[50, 3] = dqflags.pixel['DO_NOT_USE']

    # run the step
    out = RefPixStep.call(im)

    # average reference pixel value should be 1.5 (all 1's on left, all 2's on right)
    assert(out.data[0, 3, 50, 7] == 28.5)
예제 #5
0
def test_above_sigma():
    '''Test that a value greater than 3 sigma above mean of reference pixels is rejected
       in the averaging of the reference pixels to be subtracted.'''

    # create input data
    # create model of data with 0 value array
    ngroups = 5
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, :, :4] = 1.0
    im.data[:, 1:, :, 1028:] = 2.0
    im.data[0, 3, 50, 3] = 35.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im)

    # average reference pixel value should be 1.5 (all 1's on left, all 2's on right)
    assert (out.data[0, 3, 50, 7] == 28.5)
예제 #6
0
def test_refpix_subarray():
    '''Check that the correction is skipped for MIR subarray data '''

    # For MIRI, no reference pixel correction is performed on subarray data
    # No changes should be seen in the data arrays before and after correction

    # create input data
    # create model of data with 0 value array
    ngroups = 3
    ysize = 22
    xsize = 28

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)
    im.meta.subarray.name = 'MASK1550'
    im.meta.subarray.ystart = 467

    # set reference pixel values left side
    im.data[:, 1:, :, 0] = 1.0
    im.data[:, 1:, :, 1] = 2.0
    im.data[:, 1:, :, 2] = 3.0
    im.data[:, 1:, :, 3] = 4.0

    outim = RefPixStep.call(im)

    # test that the science data are not changed
    np.testing.assert_array_equal(im.data, outim.data)
예제 #7
0
def test_nan_refpix():
    '''Verify that the reference pixels flagged DO_NOT_USE are not used in the calculation

    Test that flagging a reference pixel with DO_NOT_USE does not use the pixel in the
    average. Set the pixel to NaN, which results in a NaN average value if used. If the test
    passes, then the NaN was correctly flagged and rejected from the average.'''

    # create input data
    # create model of data with 0 value array
    ngroups = 5
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, :, :4] = 1.0
    im.data[:, 1:, :, 1028:] = 2.0
    im.data[0, 3, 50, 3] = np.nan

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[50, 3] = dqflags.pixel['DO_NOT_USE']

    # run the step
    out = RefPixStep.call(im)

    # average reference pixel value should be 1.5 (all 1's on left, all 2's on right)
    assert (out.data[0, 3, 50, 7] == 28.5)
예제 #8
0
    def run_jump_step(self, infile, threshold, run_steps):
        '''Function to run the jump detection step.'''

        # output file name
        out = infile[:-5] + "_jump_CRthresh" + str(threshold) + ".fits"

        # if run_steps, run all steps prior to jump
        if run_steps:

            m = DQInitStep.call(infile)
            m = SaturationStep.call(m)
            m = SuperBiasStep.call(m)
            m_ref = RefPixStep.call(m, config_file='refpix.cfg')
            m_lin = LinearityStep.call(m)
            m_dark = DarkCurrentStep.call(m)

            # if threshold is given, use that rejection threshold
            if threshold is not None:
                m = JumpStep.call(m,
                                  output_file=out,
                                  rejection_threshold=threshold)
            else:
                m = JumpStep.call(m, output_file=out)

        # else, run only jump_step
        else:
            if threshold is not None:
                m = JumpStep.call(infile,
                                  output_file=out,
                                  rejection_threshold=threshold)
            else:
                m = JumpStep.call(infile, output_file=out)

        return m
예제 #9
0
def test_side_averaging():
    '''For MIRI data, check that the mean value in the reference pixels is calculated for each amplifier
    using the average of the left and right side reference pixels.'''
    # Test that the left and right side pixels are averaged.

    # create input data
    # create model of data with 0 value array
    ngroups = 7
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, :, :4] = 1.0
    im.data[:, 1:, :, 1028:] = 2.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im)

    # average reference pixel value should be 1.5 (all 1's on left, all 2's on right)
    assert (out.data[0, 5, 100, 50] == 48.5)
예제 #10
0
def test_above_sigma():
    '''Test that a value greater than 3 sigma above mean of reference pixels is rejected
       in the averaging of the reference pixels to be subtracted.'''

    # create input data
    # create model of data with 0 value array
    ngroups = 5
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, :, :4] = 1.0
    im.data[:, 1:, :, 1028:] = 2.0
    im.data[0, 3, 50, 3] = 35.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im)

    # average reference pixel value should be 1.5 (all 1's on left, all 2's on right)
    assert(out.data[0, 3, 50, 7] == 28.5)
예제 #11
0
파일: bias_monitor.py 프로젝트: ttemim/jwql
    def run_early_pipeline(self,
                           filename,
                           odd_even_rows=False,
                           odd_even_columns=True,
                           use_side_ref_pixels=True,
                           group_scale=False):
        """Runs the early steps of the jwst pipeline (dq_init, saturation,
        superbias, refpix) on uncalibrated files and outputs the result.

        Parameters
        ----------
        filename : str
            File on which to run the pipeline steps

        odd_even_rows : bool
            Option to treat odd and even rows separately during refpix step

        odd_even_columns : bools
            Option to treat odd and even columns separately during refpix step

        use_side_ref_pixels : bool
            Option to perform the side refpix correction during refpix step

        group_scale : bool
            Option to rescale pixel values to correct for instances where
            on-board frame averaging did not result in the proper values

        Returns
        -------
        output_filename : str
            The full path to the calibrated file
        """

        output_filename = filename.replace('_uncal', '').replace(
            '.fits', '_superbias_refpix.fits')

        if not os.path.isfile(output_filename):
            # Run the group_scale and dq_init steps on the input file
            if group_scale:
                model = GroupScaleStep.call(filename)
                model = DQInitStep.call(model)
            else:
                model = DQInitStep.call(filename)

            # Run the saturation and superbias steps
            model = SaturationStep.call(model)
            model = SuperBiasStep.call(model)

            # Run the refpix step and save the output
            model = RefPixStep.call(model,
                                    odd_even_rows=odd_even_rows,
                                    odd_even_columns=odd_even_columns,
                                    use_side_ref_pixels=use_side_ref_pixels)
            model.save(output_filename)
            set_permissions(output_filename)
        else:
            logging.info('\t{} already exists'.format(output_filename))

        return output_filename
예제 #12
0
def test_no_odd_even():
    '''Check that odd/even rows are not applied if flag is set to False'''
    # Test that odd and even rows are calculated together

    # create input data
    # create model of data with 0 value array
    ngroups = 7
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, 1:ysize:2, 0] = 1.0
    im.data[:, 1:, 1:ysize:2, 1] = 2.0
    im.data[:, 1:, 1:ysize:2, 2] = 3.0
    im.data[:, 1:, 1:ysize:2, 3] = 4.0
    im.data[:, 1:, 0:ysize - 1:2, 0] = 5.0
    im.data[:, 1:, 0:ysize - 1:2, 1] = 6.0
    im.data[:, 1:, 0:ysize - 1:2, 2] = 7.0
    im.data[:, 1:, 0:ysize - 1:2, 3] = 8.0
    im.data[:, 1:, 1:ysize:2, 1028] = 1.0
    im.data[:, 1:, 1:ysize:2, 1029] = 2.0
    im.data[:, 1:, 1:ysize:2, 1030] = 3.0
    im.data[:, 1:, 1:ysize:2, 1031] = 4.0
    im.data[:, 1:, 0:ysize - 1:2, 1028] = 5.0
    im.data[:, 1:, 0:ysize - 1:2, 1029] = 6.0
    im.data[:, 1:, 0:ysize - 1:2, 1030] = 7.0
    im.data[:, 1:, 0:ysize - 1:2, 1031] = 8.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im, odd_even_rows=False)

    # values should be different by amp and not by odd/even row
    # value of data in 5th frame is 50, ref values are subtracted from that
    # odd+even/2 -> (1+5)/2=3, (2+6)/2=4, (3+7)/2=5, (4+8)/2=6
    assert (out.data[0, 5, 100,
                     4] == 47.0)  # pick a random pixel in the 4th column
    assert (out.data[0, 5, 100, 5] == 46.0)
    assert (out.data[0, 5, 100, 6] == 45.0)
    assert (out.data[0, 5, 100, 7] == 44.0)
    assert (out.data[0, 5, 101, 4] == 47.0)
    assert (out.data[0, 5, 101, 5] == 46.0)
    assert (out.data[0, 5, 101, 6] == 45.0)
    assert (out.data[0, 5, 101, 7] == 44.0)
예제 #13
0
파일: test_refpix.py 프로젝트: rkbarry/jwst
def test_odd_even():
    '''Check that odd/even rows are applied when flag is set'''

    # Test that odd and even rows are calculated separately

    # create input data
    # create model of data with 0 value array
    ngroups = 10
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, 1:ysize:2, 0] = 1.0
    im.data[:, 1:, 1:ysize:2, 1] = 2.0
    im.data[:, 1:, 1:ysize:2, 2] = 3.0
    im.data[:, 1:, 1:ysize:2, 3] = 4.0
    im.data[:, 1:, 0:ysize - 1:2, 0] = 5.0
    im.data[:, 1:, 0:ysize - 1:2, 1] = 6.0
    im.data[:, 1:, 0:ysize - 1:2, 2] = 7.0
    im.data[:, 1:, 0:ysize - 1:2, 3] = 8.0
    im.data[:, 1:, 1:ysize:2, 1028] = 1.0
    im.data[:, 1:, 1:ysize:2, 1029] = 2.0
    im.data[:, 1:, 1:ysize:2, 1030] = 3.0
    im.data[:, 1:, 1:ysize:2, 1031] = 4.0
    im.data[:, 1:, 0:ysize - 1:2, 1028] = 5.0
    im.data[:, 1:, 0:ysize - 1:2, 1029] = 6.0
    im.data[:, 1:, 0:ysize - 1:2, 1030] = 7.0
    im.data[:, 1:, 0:ysize - 1:2, 1031] = 8.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im)

    # values should be different by amp and by odd/even row
    # value of data in 5th frame is 50, ref values are subtracted from that
    assert (out.data[0, 5, 100,
                     4] == 45.0)  # pick a random pixel in the 4th column
    assert (out.data[0, 5, 100, 5] == 44.0)
    assert (out.data[0, 5, 100, 6] == 43.0)
    assert (out.data[0, 5, 100, 7] == 42.0)
    assert (out.data[0, 5, 101, 4] == 49.0)
    assert (out.data[0, 5, 101, 5] == 48.0)
    assert (out.data[0, 5, 101, 6] == 47.0)
    assert (out.data[0, 5, 101, 7] == 46.0)
예제 #14
0
def test_no_odd_even():
    '''Check that odd/even rows are not applied if flag is set to False'''
    # Test that odd and even rows are calculated together

    # create input data
    # create model of data with 0 value array
    ngroups = 10
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    im.data = im.data * 10

    # set reference pixel values left and right side, odd and even rows.
    im.data[:, 1:, 1:ysize:2, 0] = 1.0
    im.data[:, 1:, 1:ysize:2, 1] = 2.0
    im.data[:, 1:, 1:ysize:2, 2] = 3.0
    im.data[:, 1:, 1:ysize:2, 3] = 4.0
    im.data[:, 1:, 0:ysize-1:2, 0] = 5.0
    im.data[:, 1:, 0:ysize-1:2, 1] = 6.0
    im.data[:, 1:, 0:ysize-1:2, 2] = 7.0
    im.data[:, 1:, 0:ysize-1:2, 3] = 8.0
    im.data[:, 1:, 1:ysize:2, 1028] = 1.0
    im.data[:, 1:, 1:ysize:2, 1029] = 2.0
    im.data[:, 1:, 1:ysize:2, 1030] = 3.0
    im.data[:, 1:, 1:ysize:2, 1031] = 4.0
    im.data[:, 1:, 0:ysize-1:2, 1028] = 5.0
    im.data[:, 1:, 0:ysize-1:2, 1029] = 6.0
    im.data[:, 1:, 0:ysize-1:2, 1030] = 7.0
    im.data[:, 1:, 0:ysize-1:2, 1031] = 8.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im, odd_even_rows=False)

    # values should be different by amp and not by odd/even row
    # value of data in 5th frame is 50, ref values are subtracted from that
    # odd+even/2 -> (1+5)/2=3, (2+6)/2=4, (3+7)/2=5, (4+8)/2=6
    assert(out.data[0, 5, 100, 4] == 47.0)  # pick a random pixel in the 4th column
    assert(out.data[0, 5, 100, 5] == 46.0)
    assert(out.data[0, 5, 100, 6] == 45.0)
    assert(out.data[0, 5, 100, 7] == 44.0)
    assert(out.data[0, 5, 101, 4] == 47.0)
    assert(out.data[0, 5, 101, 5] == 46.0)
    assert(out.data[0, 5, 101, 6] == 45.0)
    assert(out.data[0, 5, 101, 7] == 44.0)
예제 #15
0
파일: test_refpix.py 프로젝트: rkbarry/jwst
def test_firstframe_sub():
    '''For MIR data, check that the first group is subtracted from each group in an integration
    and added back in after the correction.

    This was found in testing the amp step. Make sure that the first frame is
    subtracted from each group and added back in afterwards. If the reference pixels
    in the first group match the reference pixels in all other groups, then the
    subtraction will result in zeros, leaving zeros to be calculated as the reference
    pixel values, and the output data will match the input data after the frame is
    added back in. So there should be no change to the data.'''

    # create input data
    # create model of data with 0 value array
    ngroups = 10
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    # set reference pixel values left and right side
    im.data[:, :, :, 0] = 1.0
    im.data[:, :, :, 1] = 2.0
    im.data[:, :, :, 2] = 3.0
    im.data[:, :, :, 3] = 4.0
    im.data[:, :, :, 1028] = 1.0
    im.data[:, :, :, 1029] = 2.0
    im.data[:, :, :, 1030] = 3.0
    im.data[:, :, :, 1031] = 4.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    outim = RefPixStep.call(im)

    diff = im.data - outim.data

    # test that the science data are not changed

    np.testing.assert_array_equal(
        np.full((1, ngroups, ysize, xsize), 0.0, dtype=float),
        diff,
        err_msg='no changes should be seen in array ')
예제 #16
0
def test_firstframe_sub():
    
    '''For MIR data, check that the first group is subtracted from each group in an integration
    and added back in after the correction.

    This was found in testing the amp step. Make sure that the first frame is
    subtracted from each group and added back in afterwards. If the reference pixels
    in the first group match the reference pixels in all other groups, then the
    subtraction will result in zeros, leaving zeros to be calculated as the reference
    pixel values, and the output data will match the input data after the frame is
    added back in. So there should be no change to the data.'''

    # create input data
    # create model of data with 0 value array
    ngroups = 10
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    # set reference pixel values left and right side
    im.data[:, :, :, 0] = 1.0
    im.data[:, :, :, 1] = 2.0
    im.data[:, :, :, 2] = 3.0
    im.data[:, :, :, 3] = 4.0
    im.data[:, :, :, 1028] = 1.0
    im.data[:, :, :, 1029] = 2.0
    im.data[:, :, :, 1030] = 3.0
    im.data[:, :, :, 1031] = 4.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    outim = RefPixStep.call(im)

    diff = im.data - outim.data

    # test that the science data are not changed

    np.testing.assert_array_equal(np.full((1, ngroups, ysize, xsize), 0.0, dtype=float),
                                  diff, err_msg='no changes should be seen in array ')
예제 #17
0
    def run(self):
        #check the proposed output name. If it exists, remove it.
        if self.outfile == None:
            dot = self.infile.rfind('.')
            self.outfile = self.infile[0:dot] + '_refpixg0.fits'

        if os.path.isfile(self.outfile):
            os.remove(self.outfile)

        #read in data
        ramp = RampModel(self.infile)
        data = ramp.data

        #get data shape
        nint, ngroup, ny, nx = data.shape

        #make a copy of the 0th read
        zero_read = copy.deepcopy(ramp.data[:, 0, :, :])

        #subtract the zeroth read from all subsequent reads
        for integration in range(nint):
            data[integration, :, :, :] -= zero_read[integration, :, :]
        ramp.data = data

        #run the SSB pipeline's refpix step
        ramp = RefPixStep.call(ramp,
                               use_side_ref_pixels=True,
                               odd_even_columns=True,
                               odd_even_rows=False,
                               config_file='refpix.cfg')

        #now add the original 0th read back in
        data = ramp.data
        for integration in range(nint):
            data[integration, :, :, :] += zero_read[integration, :, :]
            #dd = data[0,0,:,:] - zero_read[0,:,:]
        ramp.data = data

        #save the result
        ramp.save(self.outfile)
예제 #18
0
def test_each_amp():
    '''Test that each amp is calculated separately using the average of left
     and right pixels'''

    # create input data
    # create model of data with 0 value array
    ngroups = 7
    ysize = 1024
    xsize = 1032

    # make ramp model
    im = make_rampmodel(ngroups, ysize, xsize)

    # set reference pixel values left and right side
    im.data[:, 1:, :, 0] = 1.0
    im.data[:, 1:, :, 1] = 2.0
    im.data[:, 1:, :, 2] = 3.0
    im.data[:, 1:, :, 3] = 4.0
    im.data[:, 1:, :, 1028] = 1.0
    im.data[:, 1:, :, 1029] = 2.0
    im.data[:, 1:, :, 1030] = 3.0
    im.data[:, 1:, :, 1031] = 4.0

    # set reference pixels to 'REFERENCE_PIXEL'
    im.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL']
    im.pixeldq[:, 1028:] = dqflags.pixel['REFERENCE_PIXEL']

    # run the step
    out = RefPixStep.call(im)

    # for amp 1, value subtracted should be 1, for amp 2, value should be 2, etc.
    assert (out.data[0, 5, 100,
                     4] == 4.0)  # pick a random pixel in the 4th column
    assert (out.data[0, 5, 100, 5] == 3.0)
    assert (out.data[0, 5, 100, 6] == 2.0)
    assert (out.data[0, 5, 100, 7] == 1.0)
예제 #19
0
    def run(self):
        '''Main function.'''

        # Read in list of files to use to calculate saturation.
        files = self.read_listfile(self.listfile)

        # Check that input files have the same readpattern, array size, etc.
        detector, xshape, yshape = self.input_consistency_check(files)
        det_short = str(detector[3:])
        if 'long' in det_short:
            det_short = det_short[0] + '5'

        # Set up arrays to hold output data.
        sat_arr, grp_arr, new_dq, big_dq, tmp_mask = \
                                        self.init_arrays(files, xshape, yshape)

        # Create mask to isolate reference pixels
        tmp_mask[:, 4:-4, 4:-4] = True

        # Set reference pixel values so they don't get used in calculations.
        sat_arr[~tmp_mask] = 1e6
        grp_arr[~tmp_mask] = np.nan
        big_dq[~tmp_mask] = np.uint8(0)

        # Loop over files.
        for file, n in zip(files, np.arange(0, len(files))):

            # Run dq, superbias, refpix steps and save outputs (optional)
            bpm = DQInitStep.call(file)
            sup = SuperBiasStep.call(bpm)
            ref = RefPixStep.call(sup, odd_even_rows=False)

            if self.intermediates:
                sup.save(file[:-5] + '_dq_superbias.fits')
                ref.save(file[:-5] + '_dq_superbias_refpix.fits')

            # Grab the name of the mask file used from the headers
            bpmcalfile = ref.meta.ref_file.mask.name
            if 'crds' in bpmcalfile:
                jwst = bpmcalfile.find('jwst')
                bpmfile = '/grp/crds/cache/references/jwst/' + bpmcalfile[jwst:]
            else:
                bpmfile = bpmcalfile

            # Get data values
            mask = fits.getdata(bpmfile, 1)
            data = ref.data
            xstart = ref.meta.subarray.xstart
            ystart = ref.meta.subarray.ystart
            xend = ref.meta.subarray.xsize
            yend = ref.meta.subarray.ysize

            # Loop over pixel combinations for given array (no ref pixels).
            for i, j in itertools.product(np.arange(xstart + 3, xend - 4),
                                          np.arange(ystart + 3, yend - 4)):

                # Set values for bad pixels so they don't get used in calculations.
                if mask[j, i] == np.uint8(1):

                    sat_arr[n, j, i] = np.nan
                    grp_arr[n, j, i] = np.nan
                    big_dq[n, j, i] = np.uint8(1)

                else:

                    # Get signal values for each pixel.
                    signal = data[0, :, j, i].astype('float32')

                    # Get linear region early in the ramp with method 1
                    signal_range = self.get_lin_regime(signal, "method1")

                    # If signal_range can't be determined, must be weird ramp
                    if np.shape(signal_range)[0] == 0:

                        # Try again to get linear region with different method
                        signal_range = self.get_lin_regime(signal, "method2")

                        # If that still doesn't work, quit.
                        if np.shape(signal_range)[0] == 0:

                            sat_arr[n, j, i] = np.nan
                            grp_arr[n, j, i] = np.nan
                            big_dq[n, j, i] = np.uint8(2)

                        else:
                            hard_sat, first_sat_grp = \
                                self.get_saturation(signal, signal_range)

                            # Save all values.
                            sat_arr[n, j, i] = hard_sat.astype('float32')
                            grp_arr[n, j, i] = first_sat_grp.astype('float32')
                            big_dq[n, j, i] = np.uint8(3)

                    # Otherwise, must be good ramp?
                    elif np.shape(signal_range)[0] > 0:

                        # Get hard saturation.
                        hard_sat, first_sat_grp = \
                            self.get_saturation(signal, signal_range)

                        # Save all saturation values.
                        sat_arr[n, j, i] = hard_sat.astype('float32')
                        grp_arr[n, j, i] = first_sat_grp.astype('float32')

                    # Catch errors
                    else:
                        print('ERROR for pixel ', i, j)
                        sys.exit(0)

        # If each file gave same pixel DQs, make sure output DQ matches
        locs = np.all(big_dq == big_dq[0, :], axis=0)
        new_dq[locs] = big_dq[0][locs]

        # Get statistics for saturation values, averaging over exposures.
        avg, err = self.calc_stats(sat_arr, big_dq)

        # Save saturation values for each exposure to a FITS file
        newhdu = fits.PrimaryHDU(sat_arr)
        newhdulist = fits.HDUList([newhdu])
        grpname = detector + '_'  \
                      + str(DET_NUM[detector])  \
                      + '_WellDepthADU_'  \
                      + str(datetime.date.today())  \
                      + '_beforeAverage.fits'
        newhdulist.writeto(grpname, overwrite=True)

        # Save first saturated groups array to a FITS file
        newhdu = fits.PrimaryHDU(grp_arr)
        newhdulist = fits.HDUList([newhdu])
        grpname = detector + '_'  \
                      + str(DET_NUM[detector])  \
                      + '_WellDepthADU_'  \
                      + str(datetime.date.today())  \
                      + '_firstSatGroup.fits'
        newhdulist.writeto(grpname, overwrite=True)

        # Save averaged saturation values to a FITS file.
        outfilename = detector + '_'  \
                          + str(DET_NUM[detector])  \
                          + '_WellDepthADU_'  \
                          + str(datetime.date.today())  \
                          + '_ssbsaturation_DMSorient.fits'
        outfile = self.save_reffile(avg, err, new_dq, files, outfilename)

        # Save saturation errors, since the pipeline doesn't currently use them
        errhdu = fits.PrimaryHDU(err)
        errhdulist = fits.HDUList([errhdu])
        errname = detector + '_'  \
                          + str(DET_NUM[detector])  \
                          + '_WellDepthADU_'  \
                          + str(datetime.date.today())  \
                          + '_saturationErrors.fits'
        errhdulist.writeto(errname, overwrite=True)

        z = fits.open(outfile)
        z0 = z[0]
        z1 = z[1]
        z2 = z[2]
        z3 = z[3]

        # Add other things that the pipeline doesn't use, but are helpful.
        z0.header['S_DQINIT'] = ('COMPLETE', 'Data Quality Initialization')
        z0.header['S_SUPERB'] = ('COMPLETE', 'Superbias Subtraction')
        z0.header['S_REFPIX'] = ('COMPLETE', 'Reference Pixel Correction')
        newhdu = fits.HDUList([z0, z1, z2, z3])
        newhdu.writeto(outfile, overwrite=True)
예제 #20
0
def test_1overf(cases, sigmas, smoothing_lengths, tolerances, side_gains):
    '''Test amp average and 1/f noise subtraction.'''

    test_name = 'only_1overf'

    # pipeline refpix correction results
    # ----------------
    refq = RefPixStep.call(cases,
                           odd_even_columns=False,
                           use_side_ref_pixels=True,
                           side_smoothing_length=smoothing_lengths,
                           side_gain=side_gains,
                           odd_even_rows=False)
    outname = cases[:-5] + '_refpix_only1overf_pipeline.fits'
    refq.save(outname)

    # manual refpix correction results
    # --------------

    # read in input file
    ramp = RampModel(cases)
    rampdata = np.copy(ramp.data)
    pixeldq = ramp.pixeldq
    goodpix = pixeldq == 0

    #extract subarray if necessary
    xs, xe, ys, ye = get_coords_rampmodel(ramp)

    # get bounds
    num_left = np.max([4 - xs, 0])
    num_right = np.max([xe - 2044, 0])
    num_bottom = np.max([4 - ys, 0])
    num_top = np.max([ye - 2044, 0])

    # do the manual subtraction
    refq_manual, table = amp_only_refpix_corr(rampdata, sigmas, num_left,
                                              num_right, num_bottom, num_top,
                                              goodpix)
    refq_manual, outtable = include_1overf(refq_manual, sigmas, num_left,
                                           num_right, num_bottom, num_top,
                                           pixeldq, smoothing_lengths,
                                           side_gains)

    # save table to compare groups and amps
    save_df_table(outtable, cases[:-5] + '_include_1overf_amponly.dat')

    # compare manual to pipeline
    diff = refq_manual - refq.data

    # save an image of the differences between manual and pipeline subtraction
    images = RampModel()
    images.data = diff
    images.save(cases[:-5] + '_refpix_only1overf_differences.fits',
                overwrite=True)

    # check some values
    print("Group, Diffs: Amp1 through Amp4")
    for i in np.arange(0, diff.shape[1]):
        print('')
        print('Pipeline: ' + str(i) + ',' + str(refq.data[0, i, 12, 12]) +
              ',' + str(refq.data[0, i, 12, 600]) + ',' +
              str(refq.data[0, i, 12, 1030]) + ',' +
              str(refq.data[0, i, 12, 1600]))
        print('Manual: ' + str(i) + ',' + str(refq_manual[0, i, 12, 12]) +
              ',' + str(refq_manual[0, i, 12, 600]) + ',' +
              str(refq_manual[0, i, 12, 1030]) + ',' +
              str(refq_manual[0, i, 12, 1600]))

    # save out data
    outramp = RampModel()
    outramp.data = refq_manual
    outramp.save(cases[:-5] + '_refpix_only1overf_manual.fits', overwrite=True)

    # pytest to make sure pipeline ~= manual
    if np.allclose(refq_manual,
                   refq.data,
                   rtol=tolerances[1],
                   atol=tolerances[0],
                   equal_nan=True) == False:

        # if test fails, get image of (manual - pipeline) for each group
        display_multi_image(diff, np.shape(diff)[1], tolerances[1], test_name)

        print('')
        print("Group, Max Difference")
        for i in np.arange(0, diff.shape[1]):
            print('Max difference between pipeline and manual: ' + str(i) +
                  ',' + str(np.max(np.absolute(diff[0, i, :, :]))))
        print('')

    assert np.allclose(refq_manual,
                       refq.data,
                       rtol=tolerances[1],
                       atol=tolerances[0],
                       equal_nan=True) == True
예제 #21
0
def test_parameters_from_crds_fail():
    """Test retrieval of parameters from CRDS"""
    with datamodels.open(t_path(join('data', 'miri_data.fits'))) as data:
        data.meta.instrument.name = 'NIRSPEC'
        pars = RefPixStep.get_config_from_reference(data)
        assert not len(pars)
예제 #22
0
    def linearize_dark(self, darkobj):
        """Beginning with the input dark current ramp, run the dq_init, saturation, superbias
        subtraction, refpix and nonlin pipeline steps in order to produce a linearized
        version of the ramp. This will be used when combining the dark ramp with the
        simulated signal ramp.

        Parameters
        -----------
        darkobj : obj
            Instance of read_fits class containing dark current data and info

        Returns
        -------
        linDarkObj : obj
            Modified read_fits instance with linearized dark current data
        """
        from jwst.dq_init import DQInitStep
        from jwst.saturation import SaturationStep
        from jwst.superbias import SuperBiasStep
        from jwst.refpix import RefPixStep
        from jwst.linearity import LinearityStep

        # First we need to place the read_fits object into a RampModel instance
        if self.runStep['linearized_darkfile']:
            subfile = self.params['Reffiles']['linearized_darkfile']
        else:
            subfile = self.params['Reffiles']['dark']
        dark = darkobj.insert_into_datamodel(subfile)

        print('Creating a linearized version of the dark current input ramp')
        print('using JWST calibration pipeline.')

        # Run the DQ_Init step
        if self.runStep['badpixmask']:
            linDark = DQInitStep.call(dark,
                                      config_file=self.params['newRamp']['dq_configfile'],
                                      override_mask=self.params['Reffiles']['badpixmask'])
        else:
            linDark = DQInitStep.call(dark, config_file=self.params['newRamp']['dq_configfile'])

        # If the saturation map is provided, use it. If not, default to whatever is in CRDS
        if self.runStep['saturation_lin_limit']:
            linDark = SaturationStep.call(linDark,
                                          config_file=self.params['newRamp']['sat_configfile'],
                                          override_saturation=self.params['Reffiles']['saturation'])
        else:
            linDark = SaturationStep.call(linDark,
                                          config_file=self.params['newRamp']['sat_configfile'])

        # If the superbias file is provided, use it. If not, default to whatever is in CRDS
        if self.runStep['superbias']:
            linDark = SuperBiasStep.call(linDark,
                                         config_file=self.params['newRamp']['superbias_configfile'],
                                         override_superbias=self.params['Reffiles']['superbias'])
        else:
            linDark = SuperBiasStep.call(linDark,
                                         config_file=self.params['newRamp']['superbias_configfile'])

        # Reference pixel correction
        linDark = RefPixStep.call(linDark,
                                  config_file=self.params['newRamp']['refpix_configfile'])

        # Save a copy of the superbias- and reference pixel-subtracted
        # dark. This will be used later to add these effects back in
        # after the synthetic signals have been added and the non-linearity
        # effects are added back in when using the PROPER combine method.
        sbAndRefpixEffects = dark.data - linDark.data

        # Linearity correction - save the output so that you won't need to
        # re-run the pipeline when using the same dark current file in the
        # future. Use the linearity coefficient file if provided
        base_name = self.params['Output']['file'].split('/')[-1]
        linearoutfile = base_name[0:-5] + '_linearized_dark_current_ramp.fits'
        linearoutfile = os.path.join(self.params['Output']['directory'], linearoutfile)
        if self.runStep['linearity']:
            linDark = LinearityStep.call(linDark,
                                         config_file=self.params['newRamp']['linear_configfile'],
                                         override_linearity=self.params['Reffiles']['linearity'],
                                         output_file=linearoutfile)
        else:
            linDark = LinearityStep.call(linDark,
                                         config_file=self.params['newRamp']['linear_configfile'],
                                         output_file=linearoutfile)

        print(("Linearized dark (output directly from pipeline saved as {}"
               .format(linearoutfile)))

        # Now we need to put the data back into a read_fits object
        linDarkobj = read_fits.Read_fits()
        linDarkobj.model = linDark
        linDarkobj.rampmodel_to_obj()
        linDarkobj.sbAndRefpix = sbAndRefpixEffects

        return linDarkobj
예제 #23
0
def test_refpix_step(fits_input):
    """Make sure the DQInitStep runs without error."""

    RefPixStep.call(datamodels.open(fits_input), save_results=True)
예제 #24
0
        if len(line) > 3:
            files.append(line.strip())

#assume they've been run thru bpm and saturation
#so now do superbias subtraction, refpix corr,
#linearity corr, jump step

#hardwire for A1 at the moment:
reffile_dir = '/ifs/jwst/wit/witserv/data7/nrc/reference_files/SSB/CV3/cv3_reffile_conversion/'
sbfile = '/ifs/jwst/wit/witserv/data4/nrc/hilbert/superbias/cv3/A1/A1_superbias_from_list_of_biasfiles.list.fits'
linfile = reffile_dir + 'linearity/NRCA1_17004_LinearityCoeff_ADU0_2016-05-14_ssblinearity_DMSorient.fits'
gainfile = reffile_dir + 'gain/NRCA1_17004_Gain_ISIMCV3_2016-01-23_ssbgain_DMSorient.fits'
ronfile = '/grp/jwst/wit/nircam/reference_files/SSB/CV2/delivery_Dec_2015/Read_Noise/NRCA1_16989_CDSNoise_2014-10-24_ssbreadnoise_DMSorient.fits'

for file in files:
    sbout = file[0:-5] + '_superbias.fits'
    data = SuperBiasStep.call(file,
                              override_superbias=sbfile,
                              output_file=sbout)
    refout = sbout[0:-5] + '_refpix.fits'
    data = RefPixStep.call(data, output_file=refout)
    linout = refout[0:-5] + '_linearity.fits'
    data = LinearityStep.call(data,
                              override_linearity=linfile,
                              output_file=linout)
    jumpout = linout[0:-5] + '_jump.fits'
    data = JumpStep.call(data,
                         override_gain=gainfile,
                         override_readnoise=ronfile,
                         output_file=jumpout)
    det = det.lower()[3:]
    if 'long' in det:
        det = det[0] + '5'
    refdict = reffiles[det]

    #m = calwebb_detector1.Detector1Pipeline(config_file='calwebb_detector1.cfg')
    #m.saturation.override_saturation = satdir+refdict['saturation']
    #m.superbias.override_superbias = sbdir+refdict['superbias']
    #m.refpix.odd_even_rows = False
    #m.group_scale.skip = True
    #m.ipc.skip = True
    #m.rscd.skip = True
    #m.lastframe.skip = True
    #m.dark_current.skip = True
    #m.persistence.skip = True
    #m.jump.skip = True
    #m.ramp_fit.skip = False #bug in pipeline means this must
    #be run. 
    #m.linearity.override_linearity = lindir+refdict['linearity']
    #m.output_file = outfile
    #m.run(file)

    m = DQInitStep.call(file,config_file = 'dq_init.cfg')
    m = SaturationStep.call(m,config_file = 'saturation.cfg')
    m = SuperBiasStep.call(m,config_file = 'superbias.cfg')
    m = RefPixStep.call(m,config_file='refpix.cfg')
    m = LinearityStep.call(m,config_file='linearity.cfg',output_file = outfile)