Exemple #1
0
def test_stringify():
    im = DataModel()
    assert str(im) == '<DataModel>'

    im = ImageModel((10, 100))
    assert str(im) == '<ImageModel(10, 100)>'

    image = os.path.join(ROOT_DIR, "nircam_mask.fits")
    with MaskModel(image) as im:
        assert str(im) == '<MaskModel(2048, 2048) from nircam_mask.fits>'
Exemple #2
0
def test_stringify():
    im = DataModel()
    assert str(im) == '<DataModel>'
    im.close()

    im = ImageModel((10, 100))
    assert str(im) == '<ImageModel(10, 100)>'
    im.close()

    image = ROOT_DIR + "/nircam_mask.fits"
    im = MaskModel(image)
    assert str(im) == '<MaskModel(2048, 2048) from nircam_mask.fits>'
    im.close()
Exemple #3
0
def test_dq_im(xstart, ystart, xsize, ysize, nints, ngroups, instrument,
               exp_type):
    """ Check that PIXELDQ is initialized with the information from the reference file.
    test that a flagged value in the reference file flags the PIXELDQ array"""

    # create raw input data for step
    dm_ramp = make_rawramp(instrument, nints, ngroups, ysize, xsize, ystart,
                           xstart, exp_type)

    # create a MaskModel for the dq input mask
    dq, dq_def = make_maskmodel(ysize, xsize)

    # edit reference file with known bad pixel values
    dq[100, 100] = 2  # Dead pixel
    dq[200, 100] = 4  # Hot pixel
    dq[300, 100] = 8  # Unreliable_slope
    dq[400, 100] = 16  # RC
    dq[500, 100] = 1  # Do_not_use
    dq[100, 200] = 3  # Dead pixel + do not use
    dq[200, 200] = 5  # Hot pixel + do not use
    dq[300, 200] = 9  # Unreliable slope + do not use
    dq[400, 200] = 17  # RC + do not use

    # write mask model
    ref_data = MaskModel(dq=dq, dq_def=dq_def)
    ref_data.meta.instrument.name = instrument
    ref_data.meta.subarray.xstart = xstart
    ref_data.meta.subarray.xsize = xsize
    ref_data.meta.subarray.ystart = ystart
    ref_data.meta.subarray.ysize = ysize

    # run do_dqinit
    outfile = do_dqinit(dm_ramp, ref_data)

    if instrument == "FGS":
        dqdata = outfile.dq
    else:
        dqdata = outfile.pixeldq

    # assert that the pixels read back in match the mapping from ref data to science data
    assert (dqdata[100, 100] == dqflags.pixel['DEAD'])
    assert (dqdata[200, 100] == dqflags.pixel['HOT'])
    assert (dqdata[300, 100] == dqflags.pixel['UNRELIABLE_SLOPE'])
    assert (dqdata[400, 100] == dqflags.pixel['RC'])
    assert (dqdata[500, 100] == dqflags.pixel['DO_NOT_USE'])
    assert (dqdata[100, 200] == 1025)
    assert (dqdata[200, 200] == 2049)
    assert (dqdata[300, 200] == 16777217)
    assert (dqdata[400, 200] == 16385)
Exemple #4
0
def test_stringify(tmpdir):
    im = DataModel()
    assert str(im) == '<DataModel>'

    im = ImageModel((10, 100))
    assert str(im) == '<ImageModel(10, 100)>'

    path = str(tmpdir.join("nircam_mask.fits"))
    m = MaskModel((2048, 2048))
    m.save(path)
    m.close()
    with MaskModel(path) as im:
        assert str(im) == '<MaskModel(2048, 2048) from nircam_mask.fits>'
Exemple #5
0
def make_maskmodel(ysize, xsize):
    # create a mask model for the dq_init step
    csize = (ysize, xsize)
    dq = np.zeros(csize, dtype=int)
    # define a dq_def extension
    mask = MaskModel()

    dqdef = [(0, 1, 'DO_NOT_USE', 'Bad Pixel do not use'),
             (1, 2, 'DEAD', 'Dead Pixel'), (2, 4, 'HOT', 'Hot pixel'),
             (3, 8, 'UNRELIABLE_SLOPE', 'Large slope variance'),
             (4, 16, 'RC', 'RC pixel'),
             (5, 32, 'REFERENCE_PIXEL', 'Reference Pixel')]

    dq_def = np.array((dqdef), dtype=mask.dq_def.dtype)

    return dq, dq_def
Exemple #6
0
def test_dq_add1_groupdq():
    """
    Test if the dq_init code set the groupdq flag on the first
    group to 'do_not_use' by adding 1 to the flag, not overwriting to 1
    Also test whether two flags on the same pixel are added together.
    """

    # size of integration
    nints = 1
    ngroups = 5
    xsize = 1032
    ysize = 1024

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, ysize, xsize)

    # create a MaskModel for the dq input mask
    dq, dq_def = make_maskmodel(ysize, xsize)

    # write reference file with known bad pixel values

    dq[505, 505] = 1  # Do_not_use
    dq[400, 500] = 3  # do_not_use and dead pixel

    # write mask model
    ref_data = MaskModel(dq=dq, dq_def=dq_def)
    ref_data.meta.instrument.name = 'MIRI'
    ref_data.meta.subarray.xstart = 1
    ref_data.meta.subarray.xsize = xsize
    ref_data.meta.subarray.ystart = 1
    ref_data.meta.subarray.ysize = ysize

    # set a flag in the pixel dq
    dm_ramp.pixeldq[505, 505] = 4

    # run correction step
    outfile = do_dqinit(dm_ramp, ref_data)

    # test if pixels in pixeldq were incremented in value by 1
    assert (outfile.pixeldq[505, 505] == 5
            )  # check that previous dq flag is added to mask value
    assert (outfile.pixeldq[400, 500] == 1025
            )  # check two flags propagate correctly
    def mkbpm(self,
              dark1filename,
              dark2filename,
              flat1filename,
              flat2filename,
              whateveroption1=False,
              whateveroption2=2.0):

        print('HELLO mkbpm in myexamplescript.py!!!')

        # create an empty MaskModel for the bad pixel mask
        xstart = 1
        ystart = 1
        xsize = 2048
        ysize = 2048

        dq, dq_def = make_maskmodel(ysize, xsize)

        # write mask model and fix the meta data
        mask_model = MaskModel(dq=dq, dq_def=dq_def)

        return (mask_model)
Exemple #8
0
def test_groupdq():
    """Check that GROUPDQ extension is added to the data and all values are initialized to zero."""

    # size of integration
    instrument = 'MIRI'
    nints = 1
    ngroups = 5
    xsize = 1032
    ysize = 1024
    xstart = 1
    ystart = 1

    # create raw input data for step
    dm_ramp = make_rawramp(instrument, nints, ngroups, ysize, xsize, ystart,
                           xstart)

    # create a MaskModel for the dq input mask
    dq, dq_def = make_maskmodel(ysize, xsize)

    # write mask model
    ref_data = MaskModel(dq=dq, dq_def=dq_def)
    ref_data.meta.instrument.name = instrument
    ref_data.meta.subarray.xstart = xstart
    ref_data.meta.subarray.xsize = xsize
    ref_data.meta.subarray.ystart = ystart
    ref_data.meta.subarray.ysize = ysize

    # run the correction step
    outfile = do_dqinit(dm_ramp, ref_data)

    # check that GROUPDQ was created and initialized to zero
    groupdq = outfile.groupdq

    np.testing.assert_array_equal(np.full((1, ngroups, ysize, xsize),
                                          0,
                                          dtype=int),
                                  groupdq,
                                  err_msg='groupdq not initialized to zero')
Exemple #9
0
def test_err():
    """Check that a 4-D ERR array is initialized and all values are zero."""

    # size of integration
    instrument = 'MIRI'
    nints = 1
    ngroups = 5
    xsize = 1032
    ysize = 1024
    xstart = 1
    ystart = 1

    # create raw input data for step
    dm_ramp = make_rawramp(instrument, nints, ngroups, ysize, xsize, ystart,
                           xstart)

    # create a MaskModel for the dq input mask
    dq, dq_def = make_maskmodel(ysize, xsize)

    # write mask model
    ref_data = MaskModel(dq=dq, dq_def=dq_def)
    ref_data.meta.instrument.name = instrument
    ref_data.meta.subarray.xstart = xstart
    ref_data.meta.subarray.xsize = xsize
    ref_data.meta.subarray.ystart = ystart
    ref_data.meta.subarray.ysize = ysize

    # Filter out validation warnings from ref_data
    warnings.filterwarnings("ignore", category=ValidationWarning)

    # run correction step
    outfile = do_dqinit(dm_ramp, ref_data)

    # check that ERR array was created and initialized to zero
    errarr = outfile.err

    assert (errarr.ndim == 4)  # check that output err array is 4-D
    assert (np.all(errarr == 0))  # check that values are 0
Exemple #10
0
def test_mask_model():
    with MaskModel((10, 10)) as dm:
        assert dm.dq.dtype == np.uint32
Exemple #11
0
def test_dq_subarray():
    """Test that the pipeline properly extracts the subarray from the reference file."""
    # put dq flags in specific pixels and make sure they match in the output subarray file

    # create input data
    # create model of data with 0 value array
    ngroups = 50
    ysize = 224
    xsize = 288
    fullxsize = 1032
    fullysize = 1024

    # create the data and groupdq arrays
    csize = (1, ngroups, ysize, xsize)
    data = np.full(csize, 1.0)
    pixeldq = np.zeros((ysize, xsize), dtype=int)
    groupdq = np.zeros(csize, dtype=int)

    # create a JWST datamodel for MIRI data
    im = MIRIRampModel(data=data, pixeldq=pixeldq, groupdq=groupdq)

    im.meta.instrument.name = 'MIRI'
    im.meta.instrument.detector = 'MIRIMAGE'
    im.meta.instrument.filter = 'F1500W'
    im.meta.instrument.band = 'N/A'
    im.meta.observation.date = '2016-06-01'
    im.meta.observation.time = '00:00:00'
    im.meta.exposure.type = 'MIR_IMAGE'
    im.meta.subarray.name = 'MASK1550'
    im.meta.subarray.xstart = 1
    im.meta.subarray.xsize = xsize
    im.meta.subarray.ystart = 467
    im.meta.subarray.ysize = ysize

    # create full size mask model
    dq, dq_def = make_maskmodel(fullysize, fullxsize)

    # place dq flags in dq array that would be in subarray
    # MASK1550 file has colstart=1, rowstart=467
    dq[542, 100] = 2
    dq[550, 100] = 1
    dq[580, 80] = 4

    # write mask model
    ref_data = MaskModel(dq=dq, dq_def=dq_def)
    ref_data.meta.instrument.name = 'MIRI'
    ref_data.meta.subarray.xstart = 1
    ref_data.meta.subarray.xsize = fullxsize
    ref_data.meta.subarray.ystart = 1
    ref_data.meta.subarray.ysize = fullysize

    # Filter out validation warnings from ref_data
    warnings.filterwarnings("ignore", category=ValidationWarning)

    # run correction step
    outfile = do_dqinit(im, ref_data)

    # read dq array
    outpixdq = outfile.pixeldq

    # check for dq flag in pixeldq of subarray image
    assert (outpixdq[76, 100] == 1024)
    assert (outpixdq[84, 100] == 1)
    assert (outpixdq[114, 80] == 2048
            )  # check that pixel was flagged 'NO_SAT_CHECK'
def save_final_map(bad_pix_map, instrument, detector, hdulist, files, author,
                   description, pedigree, useafter, history_text, outfile):
    """Save a bad pixel map into a CRDS-formatted reference file

    Parameters
    ----------
    bad_pix_map : numpy.ndarray
        2D bad pixel array

    instrument : str
        Name of instrument associated with the bad pixel array

    detector : str
        Name of detector associated with the bad pixel array

    hdulist : astropy.fits.HDUList
        HDUList containing "extra" fits keywords

    files : list
        List of files used to create ``bad_pix_map``

    author : str
        Author of the bad pixel mask reference file

    description : str
        CRDS description to use in the final bad pixel file

    pedigree : str
        CRDS pedigree to use in the final bad pixel file

    useafter : str
        CRDS useafter string for the bad pixel file

    history_text : list
        List of strings to add as HISTORY entries to the bad pixel file

    outfile : str
        Name of the output bad pixel file
    """
    yd, xd = bad_pix_map.shape

    # Initialize the MaskModel using the hdu_list, so the new keywords will
    # be populated
    model = MaskModel(hdulist)
    model.dq = bad_pix_map

    # Create dq_def data
    dq_def = badpix_from_flats.create_dqdef()
    model.dq_def = dq_def
    model.meta.reftype = 'MASK'
    model.meta.subarray.name = 'FULL'
    model.meta.subarray.xstart = 1
    model.meta.subarray.xsize = xd
    model.meta.subarray.ystart = 1
    model.meta.subarray.ysize = yd
    model.meta.instrument.name = instrument.upper()
    model.meta.instrument.detector = detector

    # Get the fast and slow axis directions from one of the input files
    fastaxis, slowaxis = badpix_from_flats.get_fastaxis(files[0])
    model.meta.subarray.fastaxis = fastaxis
    model.meta.subarray.slowaxis = slowaxis

    model.meta.author = author
    model.meta.description = description
    model.meta.pedigree = pedigree
    model.meta.useafter = useafter

    # Add information about parameters used
    # Parameters from badpix_from_flats
    package_note = (
        'This file was created using the bad_pixel_mask.py module within the '
        'jwst_reffiles package.')

    software_dict = {
        'name': 'jwst_reffiles.bad_pixel_mask.bad_pixel_mask.py',
        'author': 'STScI',
        'homepage': 'https://github.com/spacetelescope/jwst_reffiles',
        'version': '0.0.0'
    }
    entry = util.create_history_entry(package_note, software=software_dict)
    model.history.append(entry)

    model.history.append(
        util.create_history_entry('Parameter values and descriptions:'))
    dead_search_descrip = (
        'dead_search: Boolean, whether or not to run the dead pixel search '
        'using flat field files. The value is stored in the {} keyword.'.
        format(dead_search_kw))
    model.history.append(util.create_history_entry(dead_search_descrip))

    low_qe_search_descrip = (
        'low_qe_and_open_search: Boolean, whether or not to run the low QE '
        'and open pixel search using flat field files. The value is stored in the {} '
        'keyword.'.format(low_qe_search_kw))
    model.history.append(util.create_history_entry(low_qe_search_descrip))

    dead_type_descrip = (
        'dead_search_type: Method used to identify dead pixels. The value is stored in the '
        '{} keyword.'.format(dead_search_type_kw))
    model.history.append(util.create_history_entry(dead_type_descrip))

    sigma_descrip = (
        'flat_mean_sigma_threshold: Number of standard deviations to use when sigma-clipping to '
        'calculate the mean slope image or the mean across the detector. The value '
        'used is stored in the {} keyword.'.format(mean_sig_threshold_kw))
    model.history.append(util.create_history_entry(sigma_descrip))

    norm_descrip = (
        'flat_mean_normalization_method: Specify how the mean image is normalized prior to searching '
        'for bad pixels. The value used is stored in the {} keyword.'.format(
            norm_method_kw))
    model.history.append(util.create_history_entry(norm_descrip))

    smooth_descrip = (
        'smoothing_box_width: Width in pixels of the box kernel to use to compute the '
        'smoothed mean image. The value used is stored in the {} keyword.'.
        format(smooth_box_width_kw))
    model.history.append(util.create_history_entry(smooth_descrip))

    smooth_type_descrip = (
        'smoothing_type: Type of smoothing to do: Box2D or median filtering. The value used '
        'is stored in the {} keyword.'.format(smoothing_type_kw))
    model.history.append(util.create_history_entry(smooth_type_descrip))

    dead_sig_descrip = (
        'Number of standard deviations below the mean at which a pixel is considered dead. '
        'The value used is stored in the {} keyword.'.format(
            dead_sig_thresh_kw))
    model.history.append(util.create_history_entry(dead_sig_descrip))

    max_dead_descrip = (
        'Maximum normalized signal rate of a pixel that is considered dead. The value '
        'used is stored in the {} keyword.'.format(max_dead_sig_kw))
    model.history.append(util.create_history_entry(max_dead_descrip))

    run_dead_flux_descrip = (
        'run_dead_flux_check: Boolean, if True, search for pixels erroneously flagged '
        'as dead because they are saturated in all groups. The value used is stored '
        'in the {} keyword.'.format(dead_flux_check_kw))
    model.history.append(util.create_history_entry(run_dead_flux_descrip))

    dead_flux_limit_descrip = (
        'Signal limit in raw data above which the pixel is considered not dead. The '
        'value used is stored in the {} keyword.'.format(max_dead_sig_kw))
    model.history.append(util.create_history_entry(dead_flux_limit_descrip))

    max_low_qe_descrip = (
        'The maximum normalized signal a pixel can have and be considered low QE. The '
        'value used is stored in the {} keyword.'.format(max_low_qe_kw))
    model.history.append(util.create_history_entry(max_low_qe_descrip))

    max_open_adj_descrip = (
        'The maximum normalized signal a pixel adjacent to a low QE pixel can have '
        'in order for the low QE pixel to be reclassified as OPEN. The value used '
        'is stored in the {} keyword.'.format(max_open_adj_kw))
    model.history.append(util.create_history_entry(max_open_adj_descrip))

    flat_do_not_use_descrip = (
        'List of bad pixel types (from flats) where the DO_NOT_USE flag is also applied. '
        'The values used are stored in the {} keyword.'.format(
            flat_do_not_use_kw))
    model.history.append(util.create_history_entry(flat_do_not_use_descrip))

    manual_file_descrip = (
        'Name of the ascii file containing a list of pixels to be added manually. The '
        'value used is stored in the {} keyword.'.format(manual_flag_kw))
    model.history.append(util.create_history_entry(manual_file_descrip))

    # Parameters from badpix_from_darks
    bad_from_dark_descrip = (
        'badpix_from_dark: Boolean, whether or not the bad pixel from dark search  '
        'has been run. The value is stored in the {} keyword.'.format(
            bad_from_dark_kw))
    model.history.append(util.create_history_entry(bad_from_dark_descrip))

    dark_clip_sig_descrip = (
        'Number of sigma to use when sigma-clipping 2D stdev image. The value used '
        'is stored in the {} keyword.'.format(dark_clip_sigma_kw))
    model.history.append(util.create_history_entry(dark_clip_sig_descrip))

    dark_clip_iter_descrip = (
        'Max number of iterations to use when sigma clipping mean and stdev values. '
        'The value used is stored in the {} keyword.'.format(
            dark_clip_iters_kw))
    model.history.append(util.create_history_entry(dark_clip_iter_descrip))

    dark_noisy_thresh_descrip = (
        'Number of sigma above mean noise for noisy pix threshold. The value '
        'used is stored in the {} keyword.'.format(dark_noisy_thresh_kw))
    model.history.append(util.create_history_entry(dark_noisy_thresh_descrip))

    max_sat_frac_descrip = (
        'Fraction of integrations within which a pixel must be fully saturated before '
        'flagging it as HOT. The value used is stored in the {} keyword.'.
        format(max_sat_frac_kw))
    model.history.append(util.create_history_entry(max_sat_frac_descrip))

    jump_limit_descrip = (
        'Maximum number of jumps a pixel can have in an integration before it is flagged as a '
        '"high jump" pixel. The value used is stored in the {} keyword.'.
        format(jump_limit_kw))
    model.history.append(util.create_history_entry(jump_limit_descrip))

    jump_ratio_descrip = (
        'Cutoff for the ratio of jumps early in the ramp to jumps later in the ramp when '
        'looking for RC pixels. The value used is stored in the {} keyword.'.
        format(jump_ratio_thresh_kw))
    model.history.append(util.create_history_entry(jump_ratio_descrip))

    cutoff_frac_descrip = (
        'Fraction of the integration to use when comparing the jump rate early in the integration to '
        'that across the entire integration. The value used is stored in the {} keyword.'
        .format(cutoff_frac_kw))
    model.history.append(util.create_history_entry(cutoff_frac_descrip))

    ped_sigma_descrip = (
        'Pixels with pedestal values more than this limit above the mean are flagged as RC. '
        'The value used is stored in the {} keyword.'.format(
            pedestal_sig_thresh_kw))
    model.history.append(util.create_history_entry(ped_sigma_descrip))

    rc_thresh_descrip = (
        'Fraction of input files within which a pixel must be identified as an RC pixel before '
        'it will be flagged as a permanent RC pixel. The value used is stored in the {} '
        'keyword.'.format(rc_frac_thresh_kw))
    model.history.append(util.create_history_entry(rc_thresh_descrip))

    low_ped_descrip = (
        'Fraction of input files within which a pixel must be identified as a low pedestal '
        'pixel before it will be flagged as a permanent low pedestal pixel. The value used '
        'is stored in the {} keyword.'.format(low_ped_frac_kw))
    model.history.append(util.create_history_entry(low_ped_descrip))

    high_cr_descrip = (
        'Fraction of input files within which a pixel must be flagged as having a high number '
        'of jumps before it will be flagged as permanently noisy. The value used '
        'is stored in the {} keyword.'.format(high_cr_frac_kw))
    dark_do_not_use_descrip = (
        'List of bad pixel types (from darks) where the DO_NOT_USE flag is also applied. '
        'The values used are stored in the {} keyword.'.format(
            dark_do_not_use_kw))
    model.history.append(util.create_history_entry(dark_do_not_use_descrip))

    # Add the list of input files used to create the map
    model.history.append('DATA USED:')
    for file in files:
        totlen = len(file)
        div = np.arange(0, totlen, 60)
        for val in div:
            if totlen > (val + 60):
                model.history.append(
                    util.create_history_entry(file[val:val + 60]))
            else:
                model.history.append(util.create_history_entry(file[val:]))

    # Add the do not use lists, pixel flag mappings, and user-provided
    # history text
    for history_entry in history_text:
        if history_entry != '':
            model.history.append(util.create_history_entry(history_entry))

    model.save(outfile, overwrite=True)
    print('Final bad pixel mask reference file save to: {}'.format(outfile))
Exemple #13
0
def save_final_map(bad_pix_map, instrument, detector, files, author, description, pedigree, useafter,
                   history_text, sigma_thresh, smooth_width, dead_sigma_thresh, max_dead_rate,
                   max_low_qe_rate, max_open_adj_rate, do_not_use_list, outfile):
    """Save a bad pixel map into a CRDS-formatted reference file

    Parameters
    ----------

    """
    # Define the non-standard fits header keywords by placing them in a
    # fits HDU List
    sig_thresh_keyword = 'BPMSIGMA'
    smooth_keyword = 'BPMSMOTH'
    dead_sigma_keyword = 'BPMDEDSG'
    max_dead_keyword = 'BPMMXDED'
    max_low_qe_keyword = 'BPMMXLQE'
    max_open_adj_keyword = 'BPMMXOAD'
    hdu = fits.PrimaryHDU()
    hdu.header[sig_thresh_keyword] = sigma_thresh
    hdu.header[smooth_keyword] = smooth_width
    hdu.header[dead_sigma_keyword] = dead_sigma_thresh
    hdu.header[max_dead_keyword] = max_dead_rate
    hdu.header[max_low_qe_keyword] = max_low_qe_rate
    hdu.header[max_open_adj_keyword] = max_open_adj_rate
    hdu_list = fits.HDUList([hdu])

    yd, xd = bad_pix_map.shape

    # Initialize the MaskModel using the hdu_list, so the new keywords will
    # be populated
    model = MaskModel(hdu_list)
    model.dq = bad_pix_map

    # Create dq_def data
    dq_def = create_dqdef()
    model.dq_def = dq_def
    model.meta.reftype = 'MASK'
    model.meta.subarray.name = 'FULL'
    model.meta.subarray.xstart = 1
    model.meta.subarray.xsize = xd
    model.meta.subarray.ystart = 1
    model.meta.subarray.ysize = yd
    model.meta.instrument.name = instrument.upper()
    model.meta.instrument.detector = detector

    # Get the fast and slow axis directions from one of the input files
    fastaxis, slowaxis = get_fastaxis(files[0])
    model.meta.subarray.fastaxis = fastaxis
    model.meta.subarray.slowaxis = slowaxis

    model.meta.author = author
    model.meta.description = description
    model.meta.pedigree = pedigree
    model.meta.useafter = useafter

    # Populate "extra" header keywords that will contain parameters used
    # in this module

    package_note = ('This file was created using the bad_pixel_mask.py module within the '
                    'jwst_reffiles package.')

    software_dict = {'name': 'jwst_reffiles.bad_pixel_mask.py', 'author': 'STScI',
                     'homepage': 'https://github.com/spacetelescope/jwst_reffiles',
                     'version': '0.0.0'}
    entry = util.create_history_entry(package_note, software=software_dict)
    model.history.append(entry)

    model.history.append(util.create_history_entry('Parameter values and descriptions:'))
    sigma_descrip = ('sigma_thresh: Number of standard deviations to use when sigma-clipping to '
                     'calculate the mean slope image or the mean across the detector. The value '
                     'used is stored in the {} keyword.'.format(sig_thresh_keyword))
    model.history.append(util.create_history_entry(sigma_descrip))

    smooth_descrip = ('smoothing_box_width: Width in pixels of the box kernel to use to compute the '
                      'smoothed mean image. The value used is stored in the {} keyword.'.format(smooth_keyword))
    model.history.append(util.create_history_entry(smooth_descrip))

    dead_sig_descrip = ('Number of standard deviations below the mean at which a pixel is considered dead. '
                        'The value used is stored in the {} keyword.'.format(dead_sigma_keyword))
    model.history.append(util.create_history_entry(dead_sig_descrip))

    max_dead_descrip = ('Maximum normalized signal rate of a pixel that is considered dead. The value '
                        'used is stored in the {} keyword.'.format(max_dead_keyword))
    model.history.append(util.create_history_entry(max_dead_descrip))

    max_low_qe_descrip = ('The maximum normalized signal a pixel can have and be considered low QE. The '
                          'value used is stored in the {} keyword.'.format(max_low_qe_keyword))
    model.history.append(util.create_history_entry(max_low_qe_descrip))

    max_open_adj_descrip = ('The maximum normalized signal a pixel adjacent to a low QE pixel can have '
                            'in order for the low QE pixel to be reclassified as OPEN. The value used '
                            'is stored in the {} keyword.'.format(max_open_adj_keyword))
    model.history.append(util.create_history_entry(max_open_adj_descrip))

    do_not_use_descrip = ('List of bad pixel types where the DO_NOT_USE flag is also applied. '
                          'Values used are: {}'.format(do_not_use_list))
    model.history.append(util.create_history_entry(do_not_use_descrip))

    # Add the list of input files used to create the map
    model.history.append('DATA USED:')
    for file in files:
        totlen = len(file)
        div = np.arange(0, totlen, 60)
        for val in div:
            if totlen > (val+60):
                model.history.append(util.create_history_entry(file[val:val+60]))
            else:
                model.history.append(util.create_history_entry(file[val:]))

    if history_text is not None:
        model.history.append(util.create_history_entry(history_text))

    model.save(outfile, overwrite=True)
    print('Final bad pixel mask reference file save to: {}'.format(outfile))