Ejemplo n.º 1
0
def test_create_history_entry():
    entry = util.create_history_entry("Once upon a time...")
    assert isinstance(entry, HistoryEntry)
    assert entry["description"] == "Once upon a time..."
    assert entry.get("software") is None
    assert (datetime.utcnow() - entry["time"]) < timedelta(seconds=10)

    software = {"name": "PolarBearSoft", "version": "1.2.3"}
    entry = util.create_history_entry("There was a tie-dyed polar bear...",
                                      software)
    assert isinstance(entry["software"], Software)
    assert entry["software"]["name"] == "PolarBearSoft"
    assert entry["software"]["version"] == "1.2.3"

    software = [
        {
            "name": "PolarBearSoft",
            "version": "1.2.3"
        },
        {
            "name": "BanjoSoft",
            "version": "4.5.6"
        },
    ]
    entry = util.create_history_entry("Who loved to strum the banjo.",
                                      software)
    assert isinstance(entry["software"], list)
    assert all(isinstance(s, Software) for s in entry["software"])
    assert entry["software"][0]["name"] == "PolarBearSoft"
    assert entry["software"][0]["version"] == "1.2.3"
    assert entry["software"][1]["name"] == "BanjoSoft"
    assert entry["software"][1]["version"] == "4.5.6"
def create_wfc3_distortion(detector, outname, sci_pupil,
                             sci_subarr, sci_exptype, history_entry, filter):
    """
    Create an asdf reference file with all distortion components for the NIRCam imager.
    NOTE: The IDT has not provided any distortion information. The files are constructed
    using ISIM transformations provided/(computed?) by the TEL team which they use to
    create the SIAF file.
    These reference files should be replaced when/if the IDT provides us with distortion.
    Parameters
    ----------
    detector : str
        NRCB1, NRCB2, NRCB3, NRCB4, NRCB5, NRCA1, NRCA2, NRCA3, NRCA4, NRCA5
    aperture : str
        Name of the aperture/subarray. (e.g. FULL, SUB160, SUB320, SUB640, GRISM_F322W2)
    outname : str
        Name of output file.
    Examples
    --------
    """
    # Download WFC3 Image Distortion File
    from astropy.utils.data import download_file
    fn = download_file('https://hst-crds.stsci.edu/unchecked_get/references/hst/w3m18525i_idc.fits', cache=True)
    wfc3_distortion_file = fits.open(fn)
    wfc3_filter_info = wfc3_distortion_file[1].data[list(wfc3_distortion_file[1].data['FILTER']).index(filter)]
    
    
    degree = 4  # WFC3 Distortion is fourth degree
    
    # From Bryan Hilbert:
    #   The parity term is just an indicator of the relationship between the detector y axis and the “science” y axis.
    #   A parity of -1 means that the y axes of the two systems run in opposite directions... A value of 1 indicates no flip.
    # From Colin Cox:
    #   ... for WFC3 it is always -1 so maybe people gave up mentioning it.
    parity = -1
    
    #full_aperture = detector + '_' + aperture

    # Get Siaf instance for detector/aperture
    #inst_siaf = pysiaf.Siaf('nircam')
    #siaf = inst_siaf[full_aperture]

    # *****************************************************
    # "Forward' transformations. science --> ideal --> V2V3
    xcoeffs, ycoeffs = get_distortion_coeffs(degree, wfc3_filter_info)

    sci2idlx = Polynomial2D(degree, **xcoeffs)
    sci2idly = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    idl2v2v3x, idl2v2v3y = v2v3_model('ideal', 'v2v3', parity, np.radians(wfc3_distortion_file[1].data[wfc3_distortion_file[1].data['FILTER'] == filter]['THETA'][0]))

    '''
    # *****************************************************
    # 'Reverse' transformations. V2V3 --> ideal --> science
    xcoeffs, ycoeffs = get_distortion_coeffs('Idl2Sci', siaf)

    idl2scix = Polynomial2D(degree, **xcoeffs)
    idl2sciy = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    v2v32idlx, v2v32idly = v2v3_model('v2v3', 'ideal', parity, np.radians(wfc3_distortion_file['THETA']))
    '''

    # Now create a compound model for each with the appropriate inverse
    # Inverse polynomials were removed in favor of using GWCS' numerical inverse capabilities
    sci2idl = Mapping([0, 1, 0, 1]) | sci2idlx & sci2idly
    #sci2idl.inverse = Mapping([0, 1, 0, 1]) | idl2scix & idl2sciy

    idl2v2v3 = Mapping([0, 1, 0, 1]) | idl2v2v3x & idl2v2v3y
    #idl2v2v3.inverse = Mapping([0, 1, 0, 1]) | v2v32idlx & v2v32idly

    # Now string the models together to make a single transformation

    # We also need
    # to account for the difference of 1 between the SIAF
    # coordinate values (indexed to 1) and python (indexed to 0).
    # Nadia said that this shift should be present in the
    # distortion reference file.

    core_model = sci2idl# | idl2v2v3

    # Now add in the shifts to create the full model
    # including the shift to go from 0-indexed python coords to
    # 1-indexed

    # Find the distance between (0,0) and the reference location
    xshift = Shift(wfc3_filter_info['XREF'])
    yshift = Shift(wfc3_filter_info['YREF'])
    
    # Finally, we need to shift by the v2,v3 value of the reference
    # location in order to get to absolute v2,v3 coordinates
    v2shift = Shift(wfc3_filter_info['V2REF'])
    v3shift = Shift(wfc3_filter_info['V3REF'])
    
    # SIAF coords
    index_shift = Shift(1)
    model = index_shift & index_shift | xshift & yshift | core_model | v2shift & v3shift

    # Since the inverse of all model components are now defined,
    # the total model inverse is also defined automatically

    # Save using the DistortionModel datamodel
    d = DistortionModel(model=model, input_units=u.pix,
                        output_units=u.arcsec)

    #Populate metadata

    # Keyword values in science data to which this file should
    # be applied
    p_pupil = ''
    for p in sci_pupil:
        p_pupil = p_pupil + p + '|'

    p_subarr = ''
    for p in sci_subarr:
        p_subarr = p_subarr + p + '|'

    p_exptype = ''
    for p in sci_exptype:
        p_exptype = p_exptype + p + '|'

    d.meta.instrument.p_pupil = p_pupil
    d.meta.subarray.p_subarray = p_subarr
    d.meta.exposure.p_exptype = p_exptype

    # metadata describing the reference file itself
    d.meta.title = "WFC3 Distortion"
    d.meta.instrument.name = "WFC3"
    d.meta.instrument.module = detector[-2]
    
    numdet = detector[-1]
    d.meta.instrument.channel = "LONG" if numdet == '5' else "SHORT"
    # In the reference file headers, we need to switch NRCA5 to
    # NRCALONG, and same for module B.
    d.meta.instrument.detector = (detector[0:4] + 'LONG') if numdet == 5 else detector
    
    d.meta.telescope = 'HST'
    d.meta.subarray.name = 'FULL'
    d.meta.pedigree = 'GROUND'
    d.meta.reftype = 'DISTORTION'
    d.meta.author = 'D. Nguyen'
    d.meta.litref = "https://github.com/spacetelescope/jwreftools"
    d.meta.description = "Distortion model from SIAF coefficients in pysiaf version 0.6.1"
    #d.meta.exp_type = exp_type
    d.meta.useafter = "2014-10-01T00:00:00"

    # To be ready for the future where we will have filter-dependent solutions
    d.meta.instrument.filter = 'N/A'

    # Create initial HISTORY ENTRY
    sdict = {'name': 'nircam_distortion_reffiles_from_pysiaf.py',
             'author': 'B.Hilbert',
             'homepage': 'https://github.com/spacetelescope/jwreftools',
             'version': '0.8'}

    entry = util.create_history_entry(history_entry, software=sdict)
    d.history = [entry]

    #Create additional HISTORY entries
    #entry2 = util.create_history_entry(history_2)
    #d.history.append(entry2)

    d.save(outname)
    print("Output saved to {}".format(outname))
Ejemplo n.º 3
0
def save_skyflat(skyflat,
                 skyflat_error,
                 skyflat_dq,
                 instrument='',
                 detector='',
                 fltr='',
                 pupil='',
                 subarray='GENERIC',
                 author='STScI',
                 description='Pixel flat calibration file',
                 pedigree='GROUND',
                 useafter='2021-01-01T00:00:00',
                 history='',
                 fastaxis=-1,
                 slowaxis=2,
                 substrt1=1,
                 substrt2=1,
                 filenames=[],
                 outfile='skyflat_reffile.fits'):
    """
    Saves skyflat data in a CRDS-formatted file that can be used
    as a reference file in the flat_field step of the image2 JWST pipeline.

    Parameters
    ----------
    skyflat : numpy.ndarray
        The 2D skyflat image.

    skyflat_error : numpy.ndarray
        The 2D skyflat error image.

    skyflat_dq : numpy.ndarray
        The 2D skyflat data quality image.

    instrument : str
        CRDS-required instrument for which to use this reference file for.

    detector : str
        CRDS-required detector for which to use this reference file for.

    fltr : str
        CRDS-required filter for which to use this reference file for.

    pupil : str
        CRDS-required pupil for which to use this reference file for.

    subarray : str
        CRDS-required subarray for which to use this reference file for.

    author : str
        CRDS-required name of the reference file author, to be placed in the
        referece file header.

    description : str
        CRDS-required description of the reference file, to be placed in the
        reference file header.

    pedigree : str
        CRDS-required pedigree of the data used to create the reference file.

    useafter : str
        CRDS-required date of earliest data with which this referece file
        should be used. (e.g. '2019-04-01T00:00:00').

    history : str
        CRDS-required history section to place in the reference file header.

    fastaxis : int
        CRDS-required fastaxis of the reference file.

    slowaxis : int
        CRDS-required slowaxis of the reference file.

    substrt1 : int
        CRDS-required starting pixel in axis 1 direction.

    substrt2 : int
        CRDS-required starting pixel in axis 2 direction.

    filenames : list
        A list of files that went into the skyflat creation.

    outfile : str
        The filename of the outputted skyflat reference file. Defaults to 
        {detector}_{filter}_{pupil}_skyflat.fits.

    Outputs
    -------
    {outfile}.fits
        The CRDS-formatted skyflat reference file.
    """

    m = FlatModel()

    # Populate the data
    m.data = skyflat
    m.err = skyflat_error
    m.dq = skyflat_dq
    m.dq_def = [(0, 0, 'GOOD', ''), (0, 1, 'DO_NOT_USE', ''),
                (1, 2, 'UNRELIABLE_FLAT', ''), (2, 4, 'NO_FLAT_FIELD', '')]

    # Add CRDS-required keywords
    m.meta.instrument.name = instrument
    m.meta.instrument.detector = detector
    m.meta.instrument.filter = fltr
    m.meta.instrument.pupil = pupil
    m.meta.subarray.name = subarray
    m.meta.author = author
    m.meta.description = description
    m.meta.pedigree = pedigree
    m.meta.useafter = useafter
    m.meta.subarray.fastaxis = fastaxis
    m.meta.subarray.slowaxis = slowaxis
    m.meta.reftype = 'FLAT'
    yd, xd = skyflat.shape
    m.meta.subarray.xstart = substrt1
    m.meta.subarray.xsize = xd
    m.meta.subarray.ystart = substrt2
    m.meta.subarray.ysize = yd

    # Add the list of input files used to create the skyflat reference file
    m.history.append('DATA USED:')
    for f in filenames:
        f = os.path.basename(f)
        totlen = len(f)
        div = np.arange(0, totlen, 60)
        for val in div:
            if totlen > (val + 60):
                m.history.append(util.create_history_entry(f[val:val + 60]))
            else:
                m.history.append(util.create_history_entry(f[val:]))

    # Add any more history to the header
    if history != '':
        m.history.append(util.create_history_entry(history))

    # Save the flat reference file
    m.save(outfile, overwrite=True)
Ejemplo n.º 4
0
def save_final_map(bad_pix_map, instrument, detector, hdulist, files, author,
                   description, pedigree, useafter, history_text, outfile):
    """Save a bad pixel map into a CRDS-formatted reference file

    Parameters
    ----------
    bad_pix_map : numpy.ndarray
        2D bad pixel array

    instrument : str
        Name of instrument associated with the bad pixel array

    detector : str
        Name of detector associated with the bad pixel array

    hdulist : astropy.fits.HDUList
        HDUList containing "extra" fits keywords

    files : list
        List of files used to create ``bad_pix_map``

    author : str
        Author of the bad pixel mask reference file

    description : str
        CRDS description to use in the final bad pixel file

    pedigree : str
        CRDS pedigree to use in the final bad pixel file

    useafter : str
        CRDS useafter string for the bad pixel file

    history_text : list
        List of strings to add as HISTORY entries to the bad pixel file

    outfile : str
        Name of the output bad pixel file
    """
    yd, xd = bad_pix_map.shape

    # Initialize the MaskModel using the hdu_list, so the new keywords will
    # be populated
    model = MaskModel(hdulist)
    model.dq = bad_pix_map

    # Create dq_def data
    dq_def = badpix_from_flats.create_dqdef()
    model.dq_def = dq_def
    model.meta.reftype = 'MASK'
    model.meta.subarray.name = 'FULL'
    model.meta.subarray.xstart = 1
    model.meta.subarray.xsize = xd
    model.meta.subarray.ystart = 1
    model.meta.subarray.ysize = yd
    model.meta.instrument.name = instrument.upper()
    model.meta.instrument.detector = detector

    # Get the fast and slow axis directions from one of the input files
    fastaxis, slowaxis = badpix_from_flats.get_fastaxis(files[0])
    model.meta.subarray.fastaxis = fastaxis
    model.meta.subarray.slowaxis = slowaxis

    model.meta.author = author
    model.meta.description = description
    model.meta.pedigree = pedigree
    model.meta.useafter = useafter

    # Add information about parameters used
    # Parameters from badpix_from_flats
    package_note = (
        'This file was created using the bad_pixel_mask.py module within the '
        'jwst_reffiles package.')

    software_dict = {
        'name': 'jwst_reffiles.bad_pixel_mask.bad_pixel_mask.py',
        'author': 'STScI',
        'homepage': 'https://github.com/spacetelescope/jwst_reffiles',
        'version': '0.0.0'
    }
    entry = util.create_history_entry(package_note, software=software_dict)
    model.history.append(entry)

    model.history.append(
        util.create_history_entry('Parameter values and descriptions:'))
    dead_search_descrip = (
        'dead_search: Boolean, whether or not to run the dead pixel search '
        'using flat field files. The value is stored in the {} keyword.'.
        format(dead_search_kw))
    model.history.append(util.create_history_entry(dead_search_descrip))

    low_qe_search_descrip = (
        'low_qe_and_open_search: Boolean, whether or not to run the low QE '
        'and open pixel search using flat field files. The value is stored in the {} '
        'keyword.'.format(low_qe_search_kw))
    model.history.append(util.create_history_entry(low_qe_search_descrip))

    dead_type_descrip = (
        'dead_search_type: Method used to identify dead pixels. The value is stored in the '
        '{} keyword.'.format(dead_search_type_kw))
    model.history.append(util.create_history_entry(dead_type_descrip))

    sigma_descrip = (
        'flat_mean_sigma_threshold: Number of standard deviations to use when sigma-clipping to '
        'calculate the mean slope image or the mean across the detector. The value '
        'used is stored in the {} keyword.'.format(mean_sig_threshold_kw))
    model.history.append(util.create_history_entry(sigma_descrip))

    norm_descrip = (
        'flat_mean_normalization_method: Specify how the mean image is normalized prior to searching '
        'for bad pixels. The value used is stored in the {} keyword.'.format(
            norm_method_kw))
    model.history.append(util.create_history_entry(norm_descrip))

    smooth_descrip = (
        'smoothing_box_width: Width in pixels of the box kernel to use to compute the '
        'smoothed mean image. The value used is stored in the {} keyword.'.
        format(smooth_box_width_kw))
    model.history.append(util.create_history_entry(smooth_descrip))

    smooth_type_descrip = (
        'smoothing_type: Type of smoothing to do: Box2D or median filtering. The value used '
        'is stored in the {} keyword.'.format(smoothing_type_kw))
    model.history.append(util.create_history_entry(smooth_type_descrip))

    dead_sig_descrip = (
        'Number of standard deviations below the mean at which a pixel is considered dead. '
        'The value used is stored in the {} keyword.'.format(
            dead_sig_thresh_kw))
    model.history.append(util.create_history_entry(dead_sig_descrip))

    max_dead_descrip = (
        'Maximum normalized signal rate of a pixel that is considered dead. The value '
        'used is stored in the {} keyword.'.format(max_dead_sig_kw))
    model.history.append(util.create_history_entry(max_dead_descrip))

    run_dead_flux_descrip = (
        'run_dead_flux_check: Boolean, if True, search for pixels erroneously flagged '
        'as dead because they are saturated in all groups. The value used is stored '
        'in the {} keyword.'.format(dead_flux_check_kw))
    model.history.append(util.create_history_entry(run_dead_flux_descrip))

    dead_flux_limit_descrip = (
        'Signal limit in raw data above which the pixel is considered not dead. The '
        'value used is stored in the {} keyword.'.format(max_dead_sig_kw))
    model.history.append(util.create_history_entry(dead_flux_limit_descrip))

    max_low_qe_descrip = (
        'The maximum normalized signal a pixel can have and be considered low QE. The '
        'value used is stored in the {} keyword.'.format(max_low_qe_kw))
    model.history.append(util.create_history_entry(max_low_qe_descrip))

    max_open_adj_descrip = (
        'The maximum normalized signal a pixel adjacent to a low QE pixel can have '
        'in order for the low QE pixel to be reclassified as OPEN. The value used '
        'is stored in the {} keyword.'.format(max_open_adj_kw))
    model.history.append(util.create_history_entry(max_open_adj_descrip))

    flat_do_not_use_descrip = (
        'List of bad pixel types (from flats) where the DO_NOT_USE flag is also applied. '
        'The values used are stored in the {} keyword.'.format(
            flat_do_not_use_kw))
    model.history.append(util.create_history_entry(flat_do_not_use_descrip))

    manual_file_descrip = (
        'Name of the ascii file containing a list of pixels to be added manually. The '
        'value used is stored in the {} keyword.'.format(manual_flag_kw))
    model.history.append(util.create_history_entry(manual_file_descrip))

    # Parameters from badpix_from_darks
    bad_from_dark_descrip = (
        'badpix_from_dark: Boolean, whether or not the bad pixel from dark search  '
        'has been run. The value is stored in the {} keyword.'.format(
            bad_from_dark_kw))
    model.history.append(util.create_history_entry(bad_from_dark_descrip))

    dark_clip_sig_descrip = (
        'Number of sigma to use when sigma-clipping 2D stdev image. The value used '
        'is stored in the {} keyword.'.format(dark_clip_sigma_kw))
    model.history.append(util.create_history_entry(dark_clip_sig_descrip))

    dark_clip_iter_descrip = (
        'Max number of iterations to use when sigma clipping mean and stdev values. '
        'The value used is stored in the {} keyword.'.format(
            dark_clip_iters_kw))
    model.history.append(util.create_history_entry(dark_clip_iter_descrip))

    dark_noisy_thresh_descrip = (
        'Number of sigma above mean noise for noisy pix threshold. The value '
        'used is stored in the {} keyword.'.format(dark_noisy_thresh_kw))
    model.history.append(util.create_history_entry(dark_noisy_thresh_descrip))

    max_sat_frac_descrip = (
        'Fraction of integrations within which a pixel must be fully saturated before '
        'flagging it as HOT. The value used is stored in the {} keyword.'.
        format(max_sat_frac_kw))
    model.history.append(util.create_history_entry(max_sat_frac_descrip))

    jump_limit_descrip = (
        'Maximum number of jumps a pixel can have in an integration before it is flagged as a '
        '"high jump" pixel. The value used is stored in the {} keyword.'.
        format(jump_limit_kw))
    model.history.append(util.create_history_entry(jump_limit_descrip))

    jump_ratio_descrip = (
        'Cutoff for the ratio of jumps early in the ramp to jumps later in the ramp when '
        'looking for RC pixels. The value used is stored in the {} keyword.'.
        format(jump_ratio_thresh_kw))
    model.history.append(util.create_history_entry(jump_ratio_descrip))

    cutoff_frac_descrip = (
        'Fraction of the integration to use when comparing the jump rate early in the integration to '
        'that across the entire integration. The value used is stored in the {} keyword.'
        .format(cutoff_frac_kw))
    model.history.append(util.create_history_entry(cutoff_frac_descrip))

    ped_sigma_descrip = (
        'Pixels with pedestal values more than this limit above the mean are flagged as RC. '
        'The value used is stored in the {} keyword.'.format(
            pedestal_sig_thresh_kw))
    model.history.append(util.create_history_entry(ped_sigma_descrip))

    rc_thresh_descrip = (
        'Fraction of input files within which a pixel must be identified as an RC pixel before '
        'it will be flagged as a permanent RC pixel. The value used is stored in the {} '
        'keyword.'.format(rc_frac_thresh_kw))
    model.history.append(util.create_history_entry(rc_thresh_descrip))

    low_ped_descrip = (
        'Fraction of input files within which a pixel must be identified as a low pedestal '
        'pixel before it will be flagged as a permanent low pedestal pixel. The value used '
        'is stored in the {} keyword.'.format(low_ped_frac_kw))
    model.history.append(util.create_history_entry(low_ped_descrip))

    high_cr_descrip = (
        'Fraction of input files within which a pixel must be flagged as having a high number '
        'of jumps before it will be flagged as permanently noisy. The value used '
        'is stored in the {} keyword.'.format(high_cr_frac_kw))
    dark_do_not_use_descrip = (
        'List of bad pixel types (from darks) where the DO_NOT_USE flag is also applied. '
        'The values used are stored in the {} keyword.'.format(
            dark_do_not_use_kw))
    model.history.append(util.create_history_entry(dark_do_not_use_descrip))

    # Add the list of input files used to create the map
    model.history.append('DATA USED:')
    for file in files:
        totlen = len(file)
        div = np.arange(0, totlen, 60)
        for val in div:
            if totlen > (val + 60):
                model.history.append(
                    util.create_history_entry(file[val:val + 60]))
            else:
                model.history.append(util.create_history_entry(file[val:]))

    # Add the do not use lists, pixel flag mappings, and user-provided
    # history text
    for history_entry in history_text:
        if history_entry != '':
            model.history.append(util.create_history_entry(history_entry))

    model.save(outfile, overwrite=True)
    print('Final bad pixel mask reference file save to: {}'.format(outfile))
Ejemplo n.º 5
0
def create_nircam_distortion(detector,
                             aperture,
                             outname,
                             sci_pupil,
                             sci_subarr,
                             sci_exptype,
                             history_entry,
                             author=None,
                             descrip=None,
                             pedigree=None,
                             useafter=None,
                             dist_coeffs_file=None,
                             siaf_xml_file=None):
    """
    Create an asdf reference file with all distortion components for the NIRCam imager.

    NOTE: The IDT has not provided any distortion information. The files are constructed
    using ISIM transformations provided/(computed?) by the TEL team which they use to
    create the SIAF file.
    These reference files should be replaced when/if the IDT provides us with distortion.

    Parameters
    ----------
    detector : str
        NRCB1, NRCB2, NRCB3, NRCB4, NRCB5, NRCA1, NRCA2, NRCA3, NRCA4, NRCA5

    aperture : str
        Name of the aperture/subarray. (e.g. FULL, SUB160, SUB320, SUB640, GRISM_F322W2)

    outname : str
        Name of output file.
    siaf_xml_file : str
        Name of SIAF xml file to use in place of the default SIAF version from pysiaf.
        If None, the default version in pysiaf will be used.

    sci_pupil : list
        Pupil wheel values for which this distortion solution applies

    sci_subarr : list
        List of subarray/aperture names to which this distortion solution applies

    sci_exptype : list
        List of exposure types to which this distortion solution applies

    history_entry : str
        Text to be added as a HISTORY entry in the output reference file

    author : str
        Value to place in the output file's Author metadata entry

    descrip : str
        Text to place in the output file's DECRIP header keyword

    pedgree : str
        Value to place in the output file's PEDIGREE header keyword

    useafter : str
        Value to place in the output file's USEAFTER header keyword (e.g. "2014-10-01T00:00:01")
    dist_coeffs_file : str
        Name of ascii file (nominally output by jwst_fpa package) containing distortion
        coefficients. If this is provided, the coefficients in this file are used, rather
        than those in pysiaf.

    Examples
    --------

    """
    degree = 5  # distotion in pysiaf is a 5th order polynomial
    numdet = detector[-1]
    module = detector[-2]
    channel = 'SHORT'
    if numdet == '5':
        channel = 'LONG'

    full_aperture = detector + '_' + aperture

    # Get Siaf instance for detector/aperture
    if siaf_xml_file is None:
        print('Using default SIAF version in pysiaf.')
        inst_siaf = pysiaf.Siaf('nircam')
    else:
        print(f'SIAF to be loaded from {siaf_xml_file}...')
        inst_siaf = pysiaf.Siaf(filename=siaf_xml_file, instrument='nircam')

    siaf = inst_siaf[full_aperture]

    # Find the distance between (0,0) and the reference location
    xshift, yshift = get_refpix(inst_siaf, full_aperture)

    # *****************************************************
    # If the user provides files containing distortion coefficients
    # (as output by the jwst_fpa package), use those rather than
    # retrieving coefficients from siaf.
    if dist_coeffs_file is not None:
        coeff_tab = read_distortion_coeffs_file(dist_coeffs_file)
        xcoeffs = convert_distortion_coeffs_table(coeff_tab, 'Sci2IdlX')
        ycoeffs = convert_distortion_coeffs_table(coeff_tab, 'Sci2IdlY')
        inv_xcoeffs = convert_distortion_coeffs_table(coeff_tab, 'Idl2SciX')
        inv_ycoeffs = convert_distortion_coeffs_table(coeff_tab, 'Idl2SciY')
    elif dist_coeffs_file is None:
        xcoeffs, ycoeffs = get_distortion_coeffs('Sci2Idl', siaf)
        inv_xcoeffs, inv_ycoeffs = get_distortion_coeffs('Idl2Sci', siaf)

    # V3IdlYAngle and V2Ref, V3Ref should always be taken from the latest version
    # of SIAF, rather than the output of jwst_fpa. Separate FGS/NIRISS analyses must
    # be done in order to modify these values.
    v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.

    # *****************************************************
    # "Forward' transformations. science --> ideal --> V2V3
    #label = 'Sci2Idl'
    ##from_units = 'distorted pixels'
    ##to_units = 'arcsec'

    #xcoeffs, ycoeffs = get_distortion_coeffs(label, siaf)

    sci2idlx = Polynomial2D(degree, **xcoeffs)
    sci2idly = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    parity = siaf.VIdlParity
    #v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.
    idl2v2v3x, idl2v2v3y = v2v3_model('ideal', 'v2v3', parity,
                                      v3_ideal_y_angle)

    # Finally, we need to shift by the v2,v3 value of the reference
    # location in order to get to absolute v2,v3 coordinates
    v2shift, v3shift = get_v2v3ref(siaf)

    # *****************************************************
    # 'Reverse' transformations. V2V3 --> ideal --> science
    #label = 'Idl2Sci'
    ##from_units = 'arcsec'
    ##to_units = 'distorted pixels'

    #xcoeffs, ycoeffs = get_distortion_coeffs(label, siaf)

    idl2scix = Polynomial2D(degree, **inv_xcoeffs)
    idl2sciy = Polynomial2D(degree, **inv_ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    #parity = siaf.VIdlParity
    #v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.
    v2v32idlx, v2v32idly = v2v3_model('v2v3', 'ideal', parity,
                                      v3_ideal_y_angle)

    ##"Forward' transformations. science --> ideal --> V2V3
    #sci2idlx, sci2idly, sciunit, idlunit = read_siaf_table.get_siaf_transform(coefffile,full_aperture,'science','ideal', 5)
    #idl2v2v3x, idl2v2v3y = read_siaf_table.get_siaf_v2v3_transform(coefffile,full_aperture,from_system='ideal')

    ##'Reverse' transformations. V2V3 --> ideal --> science
    #v2v32idlx, v2v32idly = read_siaf_table.get_siaf_v2v3_transform(coefffile,full_aperture,to_system='ideal')
    #idl2scix, idl2sciy, idlunit, sciunit = read_siaf_table.get_siaf_transform(coefffile,full_aperture,'ideal','science', 5)

    # Now create a compound model for each with the appropriate inverse
    sci2idl = Mapping([0, 1, 0, 1]) | sci2idlx & sci2idly
    sci2idl.inverse = Mapping([0, 1, 0, 1]) | idl2scix & idl2sciy

    idl2v2v3 = Mapping([0, 1, 0, 1]) | idl2v2v3x & idl2v2v3y
    idl2v2v3.inverse = Mapping([0, 1, 0, 1]) | v2v32idlx & v2v32idly

    # Now string the models together to make a single transformation

    # We also need
    # to account for the difference of 1 between the SIAF
    # coordinate values (indexed to 1) and python (indexed to 0).
    # Nadia said that this shift should be present in the
    # distortion reference file.

    core_model = sci2idl | idl2v2v3

    # Now add in the shifts to create the full model
    # including the shift to go from 0-indexed python coords to
    # 1-indexed

    # SIAF coords
    index_shift = Shift(1)
    model = index_shift & index_shift | xshift & yshift | core_model | v2shift & v3shift

    # Since the inverse of all model components are now defined,
    # the total model inverse is also defined automatically

    # In the reference file headers, we need to switch NRCA5 to
    # NRCALONG, and same for module B.
    if detector[-1] == '5':
        detector = detector[0:4] + 'LONG'

    # Save using the DistortionModel datamodel
    d = DistortionModel(model=model, input_units=u.pix, output_units=u.arcsec)

    #Populate metadata

    # Keyword values in science data to which this file should
    # be applied
    p_pupil = ''
    for p in sci_pupil:
        p_pupil = p_pupil + p + '|'

    p_subarr = ''
    for p in sci_subarr:
        p_subarr = p_subarr + p + '|'

    p_exptype = ''
    for p in sci_exptype:
        p_exptype = p_exptype + p + '|'

    d.meta.instrument.p_pupil = p_pupil
    d.meta.subarray.p_subarray = p_subarr
    d.meta.exposure.p_exptype = p_exptype

    #d.meta.instrument.p_pupil = "CLEAR|F162M|F164N|F323N|F405N|F470N|"
    #d.meta.p_subarray = "FULL|SUB64P|SUB160|SUB160P|SUB320|SUB400P|SUB640|SUB32TATS|SUB32TATSGRISM|SUB8FP1A|SUB8FP1B|SUB96DHSPILA|SUB96DHSPILB|SUB64FP1A|SUB64FP1B|"
    #d.meta.exposure.p_exptype = "NRC_IMAGE|NRC_TSIMAGE|NRC_FLAT|NRC_LED|NRC_WFSC|"

    # metadata describing the reference file itself
    d.meta.title = "NIRCam Distortion"
    d.meta.instrument.name = "NIRCAM"
    d.meta.instrument.module = module
    d.meta.instrument.channel = channel
    d.meta.instrument.detector = detector
    d.meta.telescope = 'JWST'
    d.meta.subarray.name = 'FULL'

    if pedigree is None:
        d.meta.pedigree = 'GROUND'
    else:
        if pedigree.upper() not in ['DUMMY', 'GROUND', 'FLIGHT']:
            raise ValueError("Bad PEDIGREE value.")
        d.meta.pedigree = pedigree.upper()

    d.meta.reftype = 'DISTORTION'

    if author is None:
        author = "B. Hilbert"
    d.meta.author = author

    d.meta.litref = "https://github.com/spacetelescope/nircam_calib/nircam_calib/reffile_creation/pipeline/distortion/nircam_distortion_reffiles_from_pysiaf.py"

    if descrip is None:
        d.meta.description = "TEST OF UPDATED CODE"
    else:
        d.meta.description = descrip

    #d.meta.exp_type = exp_type
    if useafter is None:
        d.meta.useafter = "2014-10-01T00:00:01"
    else:
        d.meta.useafter = useafter

    # To be ready for the future where we will have filter-dependent solutions
    d.meta.instrument.filter = 'N/A'

    # Create initial HISTORY ENTRY
    sdict = {
        'name': 'nircam_distortion_reffiles_from_pysiaf.py',
        'author': author,
        'homepage': 'https://github.com/spacetelescope/nircam_calib',
        'version': '0.0'
    }

    entry = util.create_history_entry(history_entry, software=sdict)
    d.history = [entry]

    #Create additional HISTORY entries
    #entry2 = util.create_history_entry(history_2)
    #d.history.append(entry2)

    d.save(outname)
    print("Output saved to {}".format(outname))
Ejemplo n.º 6
0
def save_readnoise(readnoise,
                   instrument='',
                   detector='',
                   subarray='GENERIC',
                   readpatt='ANY',
                   outfile='readnoise_jwst_reffiles.fits',
                   author='jwst_reffiles',
                   description='CDS Noise Image',
                   pedigree='GROUND',
                   useafter='2015-10-01T00:00:00',
                   history='',
                   fastaxis=-1,
                   slowaxis=2,
                   substrt1=1,
                   substrt2=1,
                   filenames=[]):
    """Saves a CRDS-formatted readnoise reference file.

    Parameters
    ----------
    readnoise : numpy.ndarray
        The 2D readnoise image.

    instrument : str
        CRDS-required instrument for which to use this reference file for.

    detector : str
        CRDS-required detector for which to use this reference file for.

    subarray : str
        CRDS-required subarray for which to use this reference file for.

    readpatt : str
        CRDS-required read pattern for which to use this reference file for.

    outfile : str
        Name of the CRDS-formatted readnoise reference file to save the final
        readnoise map to.

    author : str
        CRDS-required name of the reference file author, to be placed in the
        referece file header.

    description : str
        CRDS-required description of the reference file, to be placed in the
        reference file header.

    pedigree : str
        CRDS-required pedigree of the data used to create the reference file.

    useafter : str
        CRDS-required date of earliest data with which this referece file
        should be used. (e.g. '2019-04-01T00:00:00').

    history : str
        CRDS-required history section to place in the reference file header.

    fastaxis : int
        CRDS-required fastaxis of the reference file.

    slowaxis : int
        CRDS-required slowaxis of the reference file.

    substrt1 : int
        CRDS-required starting pixel in axis 1 direction.

    substrt2 : int
        CRDS-required starting pixel in axis 2 direction.

    filenames : list
        List of dark current files that were used to generate the reference
        file.
    """

    r = ReadnoiseModel()

    r.data = readnoise
    r.meta.bunit_data = 'DN'
    r.meta.instrument.name = instrument
    r.meta.instrument.detector = detector
    r.meta.subarray.name = subarray
    r.meta.exposure.readpatt = readpatt
    r.meta.author = author
    r.meta.description = description
    r.meta.pedigree = pedigree
    r.meta.useafter = useafter
    r.meta.subarray.fastaxis = fastaxis
    r.meta.subarray.slowaxis = slowaxis
    r.meta.reftype = 'READNOISE'

    yd, xd = readnoise.shape
    r.meta.subarray.xstart = substrt1
    r.meta.subarray.xsize = xd
    r.meta.subarray.ystart = substrt2
    r.meta.subarray.ysize = yd

    package_note = ('This file was created using the readnoise.py module '
                    'within the jwst_reffiles package.')
    software_dict = {
        'name': 'jwst_reffiles.readnoise.py',
        'author': 'STScI',
        'homepage': 'https://github.com/spacetelescope/jwst_reffiles',
        'version': '0.0.0'
    }
    entry = util.create_history_entry(package_note, software=software_dict)
    r.history.append(entry)

    # Add the list of input files used to create the readnoise reference file
    r.history.append('DATA USED:')
    for f in filenames:
        f = os.path.basename(f)
        totlen = len(f)
        div = np.arange(0, totlen, 60)
        for val in div:
            if totlen > (val + 60):
                r.history.append(util.create_history_entry(f[val:val + 60]))
            else:
                r.history.append(util.create_history_entry(f[val:]))

    if history != '':
        r.history.append(util.create_history_entry(history))

    r.save(outfile, overwrite=True)
    print('Final CRDS-formatted readnoise map saved to {}'.format(outfile))
Ejemplo n.º 7
0
def save_superbias(superbias,
                   error,
                   dq,
                   instrument='',
                   detector='',
                   subarray='GENERIC',
                   readpatt='ANY',
                   outfile='superbias_jwst_reffiles.fits',
                   author='jwst_reffiles',
                   description='Super Bias Image',
                   pedigree='GROUND',
                   useafter='2000-01-01T00:00:00',
                   history='',
                   fastaxis=-1,
                   slowaxis=2,
                   substrt1=1,
                   substrt2=1,
                   filenames=[]):
    """Saves a CRDS-formatted superbias reference file.

    Parameters
    ----------
    superbias : numpy.ndarray
        The 2D superbias image.

    error : numpy.ndarray
        The 2D superbias error image.

    dq : numpy.ndarray
        The 2D superbias data quality image.

    instrument : str
        CRDS-required instrument for which to use this reference file for.

    detector : str
        CRDS-required detector for which to use this reference file for.

    subarray : str
        CRDS-required subarray for which to use this reference file for.

    readpatt : str
        CRDS-required read pattern for which to use this reference file for.

    outfile : str
        Name of the CRDS-formatted superbias reference file to save the final
        superbias map to.

    author : str
        CRDS-required name of the reference file author, to be placed in the
        referece file header.

    description : str
        CRDS-required description of the reference file, to be placed in the
        reference file header.

    pedigree : str
        CRDS-required pedigree of the data used to create the reference file.

    useafter : str
        CRDS-required date of earliest data with which this referece file
        should be used. (e.g. '2019-04-01T00:00:00').

    history : str
        CRDS-required history section to place in the reference file header.

    fastaxis : int
        CRDS-required fastaxis of the reference file.

    slowaxis : int
        CRDS-required slowaxis of the reference file.

    substrt1 : int
        CRDS-required starting pixel in axis 1 direction.

    substrt2 : int
        CRDS-required starting pixel in axis 2 direction.

    filenames : list
        List of dark current files that were used to generate the reference
        file.
    """

    s = SuperBiasModel()

    s.data = superbias
    s.err = error
    s.dq = dq
    s.dq_def = [(0, 0, 'GOOD', ''), (0, 1, 'DO_NOT_USE', ''),
                (1, 2, 'UNRELIABLE_BIAS', '')]

    s.meta.instrument.name = instrument
    s.meta.instrument.detector = detector
    s.meta.subarray.name = subarray
    s.meta.exposure.readpatt = readpatt
    s.meta.author = author
    s.meta.description = description
    s.meta.pedigree = pedigree
    s.meta.useafter = useafter
    s.meta.subarray.fastaxis = fastaxis
    s.meta.subarray.slowaxis = slowaxis
    s.meta.reftype = 'SUPERBIAS'

    yd, xd = superbias.shape
    s.meta.subarray.xstart = substrt1
    s.meta.subarray.xsize = xd
    s.meta.subarray.ystart = substrt2
    s.meta.subarray.ysize = yd

    package_note = ('This file was created using the superbias.py module '
                    'within the jwst_reffiles package.')
    software_dict = {
        'name': 'jwst_reffiles.superbias.py',
        'author': 'STScI',
        'homepage': 'https://github.com/spacetelescope/jwst_reffiles',
        'version': '0.0.0'
    }
    entry = util.create_history_entry(package_note, software=software_dict)
    s.history.append(entry)

    # Add the list of input files used to create the superbias reference file
    s.history.append('DATA USED:')
    for f in filenames:
        f = os.path.basename(f)
        totlen = len(f)
        div = np.arange(0, totlen, 60)
        for val in div:
            if totlen > (val + 60):
                s.history.append(util.create_history_entry(f[val:val + 60]))
            else:
                s.history.append(util.create_history_entry(f[val:]))

    if history != '':
        s.history.append(util.create_history_entry(history))

    s.save(outfile, overwrite=True)
    print('Final CRDS-formatted superbias map saved to {}'.format(outfile))