예제 #1
0
def test_slicing_on_instance_with_parameterless_model():
    """
    Regression test to fix an issue where the indices attached to parameter
    names on a compound model were not handled properly when one or more
    submodels have no parameters.  This was especially evident in slicing.
    """

    p2 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
    p1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
    mapping = Mapping((0, 1, 0, 1))
    offx = Shift(-2, name='x_translation')
    offy = Shift(-1, name='y_translation')
    aff = AffineTransformation2D(matrix=[[1, 2], [3, 4]], name='rotation')
    model = mapping | (p1 & p2) | (offx & offy) | aff

    assert model.param_names == ('c0_0_1', 'c1_0_1', 'c0_1_1',
                                 'c0_0_2', 'c1_0_2', 'c0_1_2',
                                 'offset_3', 'offset_4',
                                 'matrix_5', 'translation_5')
    assert model(1, 2) == (23.0, 53.0)

    m = model[3:]
    assert m.param_names == ('offset_3', 'offset_4', 'matrix_5',
                             'translation_5')
    assert m(1, 2) == (1.0, 1.0)
예제 #2
0
def xilam2xy_fit(layout, params):
    """
    Determine polynomial fits of FPA position

    Fits are of degree 4 as a function of slit position and wavelength.
    """
    xi_arr = layout[params['s_colname']]
    lam_arr = layout[params['wave_colname']]
    x_arr = layout[params['x_colname']]
    y_arr = layout[params['y_colname']]

    ## Filter the lists: remove any points with x==0
    ## ..todo: this may not be necessary after sanitising the table
    #good = x != 0
    #xi = xi[good]
    #lam = lam[good]
    #x = x[good]
    #y = y[good]

    # compute the fits
    pinit_x = Polynomial2D(degree=4)
    pinit_y = Polynomial2D(degree=4)
    fitter = fitting.LinearLSQFitter()
    xilam2x = fitter(pinit_x, xi_arr, lam_arr, x_arr)
    xilam2y = fitter(pinit_y, xi_arr, lam_arr, y_arr)

    return xilam2x, xilam2y
def v2v3_model(from_sys, to_sys, par, angle):
    """
    Creates an astropy.modeling.Model object
    for the undistorted ("ideal") to V2V3 coordinate translation
    """
    if from_sys != 'v2v3' and to_sys != 'v2v3':
        raise ValueError("This function is designed to generate the transformation either to or from V2V3.")

    # Cast the transform functions as 1st order polynomials
    xc = {}
    yc = {}
    if to_sys == 'v2v3':
        xc['c1_0'] = par * np.cos(angle)
        xc['c0_1'] = np.sin(angle)
        yc['c1_0'] = (0.-par) * np.sin(angle)
        yc['c0_1'] = np.cos(angle)

    if from_sys == 'v2v3':
        xc['c1_0'] = par * np.cos(angle)
        xc['c0_1'] = par * (0. - np.sin(angle))
        yc['c1_0'] = np.sin(angle)
        yc['c0_1'] = np.cos(angle)

    #0,0 coeff should never be used.
    xc['c0_0'] = 0
    yc['c0_0'] = 0

    xmodel = Polynomial2D(1, **xc)
    ymodel = Polynomial2D(1, **yc)

    return xmodel, ymodel
예제 #4
0
def deriv_polynomial2d(poly):
    '''Derivatives (gradient) of a Polynomial2D model

    Parameters
    ----------
    poly : astropy.modeling.models.Polynomial2D

    Output
    ------
    gradient : tuple of Polynomial2d
    '''
    import re
    from astropy.modeling.models import Polynomial2D
    degree = poly.degree
    dpoly_dx = Polynomial2D(degree=degree - 1)
    dpoly_dy = Polynomial2D(degree=degree - 1)
    regexp = re.compile(r'c(\d+)_(\d+)')
    for pname in poly.param_names:
        # analyse the name
        match = regexp.match(pname)
        i = int(match.group(1))
        j = int(match.group(2))
        cij = getattr(poly, pname)
        pname_x = "c%d_%d" % (i - 1, j)
        pname_y = "c%d_%d" % (i, j - 1)
        setattr(dpoly_dx, pname_x, i * cij)
        setattr(dpoly_dy, pname_y, j * cij)

    return dpoly_dx, dpoly_dy
예제 #5
0
 def test_linear_2d_common_weights(self):
     model = Polynomial2D(1)
     fitter = LinearLSQFitter()
     model = fitter(model, self.x2, self.y2, self.z2, weights=self.w2)
     assert_allclose(model.c0_0, 1., atol=1e-12)
     assert_allclose(model.c1_0, -0.1, atol=1e-12)
     assert_allclose(model.c0_1, 0.2, atol=1e-12)
예제 #6
0
def test_model_set_axis_outputs():
    fitter = LinearLSQFitter()
    model_set = Polynomial2D(1, n_models=2, model_set_axis=2)
    y2, x2 = np.mgrid[:5, :5]
    # z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2)
    z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3)
    model = fitter(model_set, x2, y2, z)
    res = model(x2, y2, model_set_axis=False)
    assert z.shape == res.shape

    # Test initializing with integer model_set_axis
    # and evaluating with a different model_set_axis
    model_set = Polynomial1D(1,
                             c0=[1, 2],
                             c1=[2, 3],
                             n_models=2,
                             model_set_axis=0)
    y0 = model_set(xx)
    y1 = model_set(xx.T, model_set_axis=1)
    assert_allclose(y0[0], y1[:, 0])
    assert_allclose(y0[1], y1[:, 1])

    model_set = Polynomial1D(1,
                             c0=[[1, 2]],
                             c1=[[2, 3]],
                             n_models=2,
                             model_set_axis=1)
    y0 = model_set(xx.T)
    y1 = model_set(xx, model_set_axis=0)
    assert_allclose(y0[:, 0], y1[0])
    assert_allclose(y0[:, 1], y1[1])
    with pytest.raises(ValueError):
        model_set(x)
예제 #7
0
def to_model(coeffs, degree=5):
    """
    Creates an astropy.modeling.Model object
    Parameters
    ----------
    coeffs : array like
        Coefficients from the ISIM transformations file.
    degree : int
        Degree of polynomial.
        Default is 5 as in the ISIM file but many of the polynomials are of
        a smaller degree.
    Returns
    -------
    poly : astropy.modeling.Polynomial2D
        Polynomial model transforming one coordinate (x or y) between two systems.
    """

    #map Colin's coefficients into the order expected by Polynomial2D
    c = {}
    for cname in coeffs.colnames:
        siaf_i = int(cname[-2])
        siaf_j = int(cname[-1])
        name = 'c{0}_{1}'.format(siaf_i - siaf_j, siaf_j)
        c[name] = coeffs[cname].data[0]

    #0,0 coefficient should not be used, according to Colin's TR
    #JWST-STScI-001550
    c['c0_0'] = 0

    return Polynomial2D(degree, **c)
예제 #8
0
 def test_linear_2d_separate_weights_axis_2(self):
     model = Polynomial2D(1, model_set_axis=2)
     fitter = LinearLSQFitter()
     model = fitter(model, self.x2, self.y2, np.rollaxis(self.z2, 0, 3),
                    weights=self.w2[..., np.newaxis])
     assert_allclose(model.c0_0, 1., atol=1e-12)
     assert_allclose(model.c1_0, -0.1, atol=1e-12)
     assert_allclose(model.c0_1, 0.2, atol=1e-12)
예제 #9
0
 def fit(cls, xin, yin, xout, degree=4):
     """
     Determine polynomial fits
     """
     pinit = Polynomial2D(degree=degree)
     fitter = fitting.LinearLSQFitter()
     fit = fitter(pinit, xin, yin, xout)
     return Transform2D(fit2matrix(fit))
예제 #10
0
def xy2xilam_fit(layout, params):
    """
    Determine polynomial fits of wavelength/slit position

    Fits are of degree 4 as a function of focal plane position
    """

    xi_arr = layout[params['s_colname']]
    lam_arr = layout[params['wave_colname']]
    x_arr = layout[params['x_colname']]
    y_arr = layout[params['y_colname']]

    pinit_xi = Polynomial2D(degree=4)
    pinit_lam = Polynomial2D(degree=4)
    fitter = fitting.LinearLSQFitter()
    xy2xi = fitter(pinit_xi, x_arr, y_arr, xi_arr)
    xy2lam = fitter(pinit_lam, x_arr, y_arr, lam_arr)

    return xy2xi, xy2lam
예제 #11
0
def test_model_axis_2():
    """
    Test that a model initialized with model_set_axis=2
    can be evaluated with model_set_axis=False.
    """
    p1 = Polynomial1D(1,
                      c0=[[[1, 2, 3]]],
                      c1=[[[10, 20, 30]]],
                      n_models=3,
                      model_set_axis=2)
    t1 = Polynomial1D(1, c0=1, c1=10)
    t2 = Polynomial1D(1, c0=2, c1=20)
    t3 = Polynomial1D(1, c0=3, c1=30)

    with pytest.raises(ValueError):
        p1(x)

    with pytest.raises(ValueError):
        p1(xx)

    y = p1(x, model_set_axis=False)
    assert y.shape == (1, 4, 3)
    assert_allclose(y[:, :, 0].flatten(), t1(x))
    assert_allclose(y[:, :, 1].flatten(), t2(x))
    assert_allclose(y[:, :, 2].flatten(), t3(x))

    p2 = Polynomial2D(1,
                      c0_0=[[[0, 1, 2]]],
                      c0_1=[[[3, 4, 5]]],
                      c1_0=[[[5, 6, 7]]],
                      n_models=3,
                      model_set_axis=2)
    t1 = Polynomial2D(1, c0_0=0, c0_1=3, c1_0=5)
    t2 = Polynomial2D(1, c0_0=1, c0_1=4, c1_0=6)
    t3 = Polynomial2D(1, c0_0=2, c0_1=5, c1_0=7)

    assert p2.c0_0.shape == ()
    y = p2(x, x, model_set_axis=False)
    assert y.shape == (1, 4, 3)
    # These are columns along the 2nd axis.
    assert_allclose(y[:, :, 0].flatten(), t1(x, x))
    assert_allclose(y[:, :, 1].flatten(), t2(x, x))
    assert_allclose(y[:, :, 2].flatten(), t3(x, x))
예제 #12
0
파일: wcs.py 프로젝트: chris-simpson/gwcs
def _fit_2D_poly(ntransform, npoints, degree, max_error,
                 xin, yin, xout, yout,
                 xind, yind, xoutd, youtd,
                 verbose=False):
    """
    Fit a pair of ordinary 2D polynomials to the supplied transform.

    """
    llsqfitter = LinearLSQFitter()

    # The case of one pass with the specified polynomial degree
    if degree:
        deglist = [degree]
    else:
        deglist = range(10)
    prev_max_error = float(np.inf)
    if verbose:
        print(f'maximum_specified_error: {max_error}')
    for deg in deglist:
        poly_x = Polynomial2D(degree=deg)
        poly_y = Polynomial2D(degree=deg)
        fit_poly_x = llsqfitter(poly_x, xin, yin, xout)
        fit_poly_y = llsqfitter(poly_y, xin, yin, yout)
        max_resid = _compute_distance_residual(xout, yout,
                                               fit_poly_x(xin, yin),
                                               fit_poly_y(xin, yin))
        if max_resid > prev_max_error:
            raise RuntimeError('Failed to achieve required error tolerance')
        if verbose:
            print(f'Degree = {deg}, max_resid = {max_resid}')
        if max_resid < max_error:
            # Check to see if double sampling meets error requirement.
            max_resid = _compute_distance_residual(xoutd, youtd,
                                                   fit_poly_x(xind, yind),
                                                   fit_poly_y(xind, yind))
            if verbose:
                print(f'Double sampling check: maximum residual={max_resid}')
            if max_resid < max_error:
                if verbose:
                    print('terminating condition met')
                break
    return fit_poly_x, fit_poly_y, max_resid
예제 #13
0
def _xiy2xlam_fit(layout, params):
    """Determine polynomial fits of wavelength/slit position

    Fits are of degree 4 as a function of focal plane position
    """

    # These are helper functions to allow fitting of left/right edges
    # for the purpose of checking whether a trace is on a chip or not.

    xi_arr = layout[params['s_colname']]
    lam_arr = layout[params['wave_colname']]
    x_arr = layout[params['x_colname']]
    y_arr = layout[params['y_colname']]

    pinit_x = Polynomial2D(degree=4)
    pinit_lam = Polynomial2D(degree=4)
    fitter = fitting.LinearLSQFitter()
    xiy2x = fitter(pinit_x, xi_arr, y_arr, x_arr)
    xiy2lam = fitter(pinit_lam, xi_arr, y_arr, lam_arr)
    return xiy2x, xiy2lam
예제 #14
0
def test_linear_fit_2d_model_set_errors():

    init_model = Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
    x = np.arange(10)
    y = np.arange(10)
    z = init_model(x, y, model_set_axis=False)

    fitter = LinearLSQFitter()
    with pytest.raises(ValueError):
        fitter(init_model, x[:5], y, z)
    with pytest.raises(ValueError):
        fitter(init_model, x, y, z[:, :5])
예제 #15
0
def test_linear_fit_2d_model_set_common_weight():
    init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
                              n_models=2,
                              fixed={'c1_0': True, 'c0_1': True})

    x, y = np.mgrid[0:5, 0:5]
    zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])

    fitter = LinearLSQFitter()
    fitted_model = fitter(init_model, x, y, zz, weights=np.ones((5, 5)))

    assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
                    atol=1e-14)
예제 #16
0
def v2v3_model(from_sys, to_sys, par, angle):
    """
    Creates an astropy.modeling.Model object
    for the undistorted ("ideal") to V2V3 coordinate translation
    """
    if from_sys != 'v2v3' and to_sys != 'v2v3':
        print(
            "This function is designed to generate the transformation either to or from V2V3."
        )
        sys.exit()

    #cast the transform functions as 1st order polynomials
    xc = {}
    yc = {}
    if to_sys == 'v2v3':
        xc['c1_0'] = par * np.cos(angle)
        xc['c0_1'] = np.sin(angle)
        yc['c1_0'] = (0. - par) * np.sin(angle)
        yc['c0_1'] = np.cos(angle)

    if from_sys == 'v2v3':
        xc['c1_0'] = par * np.cos(angle)
        xc['c0_1'] = par * (0. - np.sin(angle))
        yc['c1_0'] = np.sin(angle)
        yc['c0_1'] = np.cos(angle)

    #0,0 coeff should never be used.
    xc['c0_0'] = 0
    yc['c0_0'] = 0

    #print("coeffs for v2v3 transform:")
    #for key in xc:
    #    print("{} {}".format(key,xc[key]))
    #sys.exit()

    xmodel = Polynomial2D(1, **xc)
    ymodel = Polynomial2D(1, **yc)

    return xmodel, ymodel
예제 #17
0
def test_LabelMapperRange(tmpdir):
    m = []
    for i in np.arange(9) * .1:
        c0_0, c1_0, c0_1, c1_1 = np.ones((4, )) * i
        m.append(Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1))
    keys = np.array([[4.88, 5.64], [5.75, 6.5], [6.67, 7.47], [7.7, 8.63],
                     [8.83, 9.96], [10.19, 11.49], [11.77, 13.28],
                     [13.33, 15.34], [15.56, 18.09]])
    rmapper = {}
    for k, v in zip(keys, m):
        rmapper[tuple(k)] = v
    sel = selector.LabelMapperRange(('x', 'y'),
                                    rmapper,
                                    inputs_mapping=Mapping((0, ), n_inputs=2))
    tree = {'model': sel}
    helpers.assert_roundtrip_tree(tree, tmpdir, extensions=GWCSExtension())
예제 #18
0
    def test_derivative_of_2D_polynomial_equal_to_analytical_derivative(self):
        from astropy.modeling.models import Polynomial2D

        ximg, yimg = np.meshgrid(np.linspace(-1, 1, 101),
                                 np.linspace(-1, 1, 101))
        poly = Polynomial2D(2, c0_0=1, c1_0=2, c2_0=3,
                            c0_1=-1.5, c0_2=0.4, c1_1=-2)
        # Expected values
        y_x = 2 + 6 * ximg - 2 * yimg
        y_y = -1.5 + 0.8 * yimg - 2 * ximg

        dpoly_x, dpoly_y = deriv_polynomial2d(poly)
        # Computed values
        y_x_test = dpoly_x(ximg, yimg)
        y_y_test = dpoly_y(ximg, yimg)

        assert np.allclose(y_x, y_x_test)
        assert np.allclose(y_y, y_y_test)
예제 #19
0
    def test_01(self):
        '''Test simmetis.utils.deriv_polynomial2d'''
        from astropy.modeling.models import Polynomial2D

        ximg, yimg = np.meshgrid(np.linspace(-1, 1, 101),
                                 np.linspace(-1, 1, 101))
        poly = Polynomial2D(2,
                            c0_0=1,
                            c1_0=2,
                            c2_0=3,
                            c0_1=-1.5,
                            c0_2=0.4,
                            c1_1=-2)
        # Expected values
        y_x = 2 + 6 * ximg - 2 * yimg
        y_y = -1.5 + 0.8 * yimg - 2 * ximg

        dpoly_x, dpoly_y = deriv_polynomial2d(poly)
        # Computed values
        y_x_test = dpoly_x(ximg, yimg)
        y_y_test = dpoly_y(ximg, yimg)

        assert np.allclose(y_x, y_x_test)
        assert np.allclose(y_y, y_y_test)
def test_invalid_operands():
    """
    Test that certain operators do not work with models whose inputs/outputs do
    not match up correctly.
    """

    with pytest.raises(ModelDefinitionError):
        Rotation2D(90) | Gaussian1D(1, 0, 0.1)

    with pytest.raises(ModelDefinitionError):
        Rotation2D(90) + Gaussian1D(1, 0, 0.1)


@pytest.mark.parametrize(
    'poly',
    [Chebyshev2D(1, 2), Polynomial2D(2),
     Legendre2D(1, 2)])
def test_compound_with_polynomials_2d(poly):
    """
    Tests that polynomials are scaled when used in compound models.
    Issue #3699
    """
    poly.parameters = [1, 2, 3, 4, 1, 2]
    shift = Shift(3)
    model = poly | shift
    x, y = np.mgrid[:20, :37]
    result_compound = model(x, y)
    result = shift(poly(x, y))
    assert_allclose(result, result_compound)

예제 #21
0
def test_invalid_operands():
    """
    Test that certain operators do not work with models whose inputs/outputs do
    not match up correctly.
    """

    with pytest.raises(ModelDefinitionError):
        Rotation2D(90) | Gaussian1D(1, 0, 0.1)

    with pytest.raises(ModelDefinitionError):
        Rotation2D(90) + Gaussian1D(1, 0, 0.1)


@pytest.mark.parametrize('poly', [
    Chebyshev2D(1, 2),
    Polynomial2D(2),
    Legendre2D(1, 2),
    Chebyshev1D(5),
    Legendre1D(5),
    Polynomial1D(5)
])
def test_compound_with_polynomials(poly):
    """
    Tests that polynomials are scaled when used in compound models.
    Issue #3699
    """
    poly.parameters = [1, 2, 3, 4, 1, 2]
    shift = Shift(3)
    model = poly | shift
    x, y = np.mgrid[:20, :37]
    result_compound = model(x, y)
def create_wfc3_distortion(detector, outname, sci_pupil,
                             sci_subarr, sci_exptype, history_entry, filter):
    """
    Create an asdf reference file with all distortion components for the NIRCam imager.
    NOTE: The IDT has not provided any distortion information. The files are constructed
    using ISIM transformations provided/(computed?) by the TEL team which they use to
    create the SIAF file.
    These reference files should be replaced when/if the IDT provides us with distortion.
    Parameters
    ----------
    detector : str
        NRCB1, NRCB2, NRCB3, NRCB4, NRCB5, NRCA1, NRCA2, NRCA3, NRCA4, NRCA5
    aperture : str
        Name of the aperture/subarray. (e.g. FULL, SUB160, SUB320, SUB640, GRISM_F322W2)
    outname : str
        Name of output file.
    Examples
    --------
    """
    # Download WFC3 Image Distortion File
    from astropy.utils.data import download_file
    fn = download_file('https://hst-crds.stsci.edu/unchecked_get/references/hst/w3m18525i_idc.fits', cache=True)
    wfc3_distortion_file = fits.open(fn)
    wfc3_filter_info = wfc3_distortion_file[1].data[list(wfc3_distortion_file[1].data['FILTER']).index(filter)]
    
    
    degree = 4  # WFC3 Distortion is fourth degree
    
    # From Bryan Hilbert:
    #   The parity term is just an indicator of the relationship between the detector y axis and the “science” y axis.
    #   A parity of -1 means that the y axes of the two systems run in opposite directions... A value of 1 indicates no flip.
    # From Colin Cox:
    #   ... for WFC3 it is always -1 so maybe people gave up mentioning it.
    parity = -1
    
    #full_aperture = detector + '_' + aperture

    # Get Siaf instance for detector/aperture
    #inst_siaf = pysiaf.Siaf('nircam')
    #siaf = inst_siaf[full_aperture]

    # *****************************************************
    # "Forward' transformations. science --> ideal --> V2V3
    xcoeffs, ycoeffs = get_distortion_coeffs(degree, wfc3_filter_info)

    sci2idlx = Polynomial2D(degree, **xcoeffs)
    sci2idly = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    idl2v2v3x, idl2v2v3y = v2v3_model('ideal', 'v2v3', parity, np.radians(wfc3_distortion_file[1].data[wfc3_distortion_file[1].data['FILTER'] == filter]['THETA'][0]))

    '''
    # *****************************************************
    # 'Reverse' transformations. V2V3 --> ideal --> science
    xcoeffs, ycoeffs = get_distortion_coeffs('Idl2Sci', siaf)

    idl2scix = Polynomial2D(degree, **xcoeffs)
    idl2sciy = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    v2v32idlx, v2v32idly = v2v3_model('v2v3', 'ideal', parity, np.radians(wfc3_distortion_file['THETA']))
    '''

    # Now create a compound model for each with the appropriate inverse
    # Inverse polynomials were removed in favor of using GWCS' numerical inverse capabilities
    sci2idl = Mapping([0, 1, 0, 1]) | sci2idlx & sci2idly
    #sci2idl.inverse = Mapping([0, 1, 0, 1]) | idl2scix & idl2sciy

    idl2v2v3 = Mapping([0, 1, 0, 1]) | idl2v2v3x & idl2v2v3y
    #idl2v2v3.inverse = Mapping([0, 1, 0, 1]) | v2v32idlx & v2v32idly

    # Now string the models together to make a single transformation

    # We also need
    # to account for the difference of 1 between the SIAF
    # coordinate values (indexed to 1) and python (indexed to 0).
    # Nadia said that this shift should be present in the
    # distortion reference file.

    core_model = sci2idl# | idl2v2v3

    # Now add in the shifts to create the full model
    # including the shift to go from 0-indexed python coords to
    # 1-indexed

    # Find the distance between (0,0) and the reference location
    xshift = Shift(wfc3_filter_info['XREF'])
    yshift = Shift(wfc3_filter_info['YREF'])
    
    # Finally, we need to shift by the v2,v3 value of the reference
    # location in order to get to absolute v2,v3 coordinates
    v2shift = Shift(wfc3_filter_info['V2REF'])
    v3shift = Shift(wfc3_filter_info['V3REF'])
    
    # SIAF coords
    index_shift = Shift(1)
    model = index_shift & index_shift | xshift & yshift | core_model | v2shift & v3shift

    # Since the inverse of all model components are now defined,
    # the total model inverse is also defined automatically

    # Save using the DistortionModel datamodel
    d = DistortionModel(model=model, input_units=u.pix,
                        output_units=u.arcsec)

    #Populate metadata

    # Keyword values in science data to which this file should
    # be applied
    p_pupil = ''
    for p in sci_pupil:
        p_pupil = p_pupil + p + '|'

    p_subarr = ''
    for p in sci_subarr:
        p_subarr = p_subarr + p + '|'

    p_exptype = ''
    for p in sci_exptype:
        p_exptype = p_exptype + p + '|'

    d.meta.instrument.p_pupil = p_pupil
    d.meta.subarray.p_subarray = p_subarr
    d.meta.exposure.p_exptype = p_exptype

    # metadata describing the reference file itself
    d.meta.title = "WFC3 Distortion"
    d.meta.instrument.name = "WFC3"
    d.meta.instrument.module = detector[-2]
    
    numdet = detector[-1]
    d.meta.instrument.channel = "LONG" if numdet == '5' else "SHORT"
    # In the reference file headers, we need to switch NRCA5 to
    # NRCALONG, and same for module B.
    d.meta.instrument.detector = (detector[0:4] + 'LONG') if numdet == 5 else detector
    
    d.meta.telescope = 'HST'
    d.meta.subarray.name = 'FULL'
    d.meta.pedigree = 'GROUND'
    d.meta.reftype = 'DISTORTION'
    d.meta.author = 'D. Nguyen'
    d.meta.litref = "https://github.com/spacetelescope/jwreftools"
    d.meta.description = "Distortion model from SIAF coefficients in pysiaf version 0.6.1"
    #d.meta.exp_type = exp_type
    d.meta.useafter = "2014-10-01T00:00:00"

    # To be ready for the future where we will have filter-dependent solutions
    d.meta.instrument.filter = 'N/A'

    # Create initial HISTORY ENTRY
    sdict = {'name': 'nircam_distortion_reffiles_from_pysiaf.py',
             'author': 'B.Hilbert',
             'homepage': 'https://github.com/spacetelescope/jwreftools',
             'version': '0.8'}

    entry = util.create_history_entry(history_entry, software=sdict)
    d.history = [entry]

    #Create additional HISTORY entries
    #entry2 = util.create_history_entry(history_2)
    #d.history.append(entry2)

    d.save(outname)
    print("Output saved to {}".format(outname))
예제 #23
0
def create_nircam_distortion(detector,
                             aperture,
                             outname,
                             sci_pupil,
                             sci_subarr,
                             sci_exptype,
                             history_entry,
                             author=None,
                             descrip=None,
                             pedigree=None,
                             useafter=None,
                             dist_coeffs_file=None,
                             siaf_xml_file=None):
    """
    Create an asdf reference file with all distortion components for the NIRCam imager.

    NOTE: The IDT has not provided any distortion information. The files are constructed
    using ISIM transformations provided/(computed?) by the TEL team which they use to
    create the SIAF file.
    These reference files should be replaced when/if the IDT provides us with distortion.

    Parameters
    ----------
    detector : str
        NRCB1, NRCB2, NRCB3, NRCB4, NRCB5, NRCA1, NRCA2, NRCA3, NRCA4, NRCA5

    aperture : str
        Name of the aperture/subarray. (e.g. FULL, SUB160, SUB320, SUB640, GRISM_F322W2)

    outname : str
        Name of output file.
    siaf_xml_file : str
        Name of SIAF xml file to use in place of the default SIAF version from pysiaf.
        If None, the default version in pysiaf will be used.

    sci_pupil : list
        Pupil wheel values for which this distortion solution applies

    sci_subarr : list
        List of subarray/aperture names to which this distortion solution applies

    sci_exptype : list
        List of exposure types to which this distortion solution applies

    history_entry : str
        Text to be added as a HISTORY entry in the output reference file

    author : str
        Value to place in the output file's Author metadata entry

    descrip : str
        Text to place in the output file's DECRIP header keyword

    pedgree : str
        Value to place in the output file's PEDIGREE header keyword

    useafter : str
        Value to place in the output file's USEAFTER header keyword (e.g. "2014-10-01T00:00:01")
    dist_coeffs_file : str
        Name of ascii file (nominally output by jwst_fpa package) containing distortion
        coefficients. If this is provided, the coefficients in this file are used, rather
        than those in pysiaf.

    Examples
    --------

    """
    degree = 5  # distotion in pysiaf is a 5th order polynomial
    numdet = detector[-1]
    module = detector[-2]
    channel = 'SHORT'
    if numdet == '5':
        channel = 'LONG'

    full_aperture = detector + '_' + aperture

    # Get Siaf instance for detector/aperture
    if siaf_xml_file is None:
        print('Using default SIAF version in pysiaf.')
        inst_siaf = pysiaf.Siaf('nircam')
    else:
        print(f'SIAF to be loaded from {siaf_xml_file}...')
        inst_siaf = pysiaf.Siaf(filename=siaf_xml_file, instrument='nircam')

    siaf = inst_siaf[full_aperture]

    # Find the distance between (0,0) and the reference location
    xshift, yshift = get_refpix(inst_siaf, full_aperture)

    # *****************************************************
    # If the user provides files containing distortion coefficients
    # (as output by the jwst_fpa package), use those rather than
    # retrieving coefficients from siaf.
    if dist_coeffs_file is not None:
        coeff_tab = read_distortion_coeffs_file(dist_coeffs_file)
        xcoeffs = convert_distortion_coeffs_table(coeff_tab, 'Sci2IdlX')
        ycoeffs = convert_distortion_coeffs_table(coeff_tab, 'Sci2IdlY')
        inv_xcoeffs = convert_distortion_coeffs_table(coeff_tab, 'Idl2SciX')
        inv_ycoeffs = convert_distortion_coeffs_table(coeff_tab, 'Idl2SciY')
    elif dist_coeffs_file is None:
        xcoeffs, ycoeffs = get_distortion_coeffs('Sci2Idl', siaf)
        inv_xcoeffs, inv_ycoeffs = get_distortion_coeffs('Idl2Sci', siaf)

    # V3IdlYAngle and V2Ref, V3Ref should always be taken from the latest version
    # of SIAF, rather than the output of jwst_fpa. Separate FGS/NIRISS analyses must
    # be done in order to modify these values.
    v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.

    # *****************************************************
    # "Forward' transformations. science --> ideal --> V2V3
    #label = 'Sci2Idl'
    ##from_units = 'distorted pixels'
    ##to_units = 'arcsec'

    #xcoeffs, ycoeffs = get_distortion_coeffs(label, siaf)

    sci2idlx = Polynomial2D(degree, **xcoeffs)
    sci2idly = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    parity = siaf.VIdlParity
    #v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.
    idl2v2v3x, idl2v2v3y = v2v3_model('ideal', 'v2v3', parity,
                                      v3_ideal_y_angle)

    # Finally, we need to shift by the v2,v3 value of the reference
    # location in order to get to absolute v2,v3 coordinates
    v2shift, v3shift = get_v2v3ref(siaf)

    # *****************************************************
    # 'Reverse' transformations. V2V3 --> ideal --> science
    #label = 'Idl2Sci'
    ##from_units = 'arcsec'
    ##to_units = 'distorted pixels'

    #xcoeffs, ycoeffs = get_distortion_coeffs(label, siaf)

    idl2scix = Polynomial2D(degree, **inv_xcoeffs)
    idl2sciy = Polynomial2D(degree, **inv_ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    #parity = siaf.VIdlParity
    #v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.
    v2v32idlx, v2v32idly = v2v3_model('v2v3', 'ideal', parity,
                                      v3_ideal_y_angle)

    ##"Forward' transformations. science --> ideal --> V2V3
    #sci2idlx, sci2idly, sciunit, idlunit = read_siaf_table.get_siaf_transform(coefffile,full_aperture,'science','ideal', 5)
    #idl2v2v3x, idl2v2v3y = read_siaf_table.get_siaf_v2v3_transform(coefffile,full_aperture,from_system='ideal')

    ##'Reverse' transformations. V2V3 --> ideal --> science
    #v2v32idlx, v2v32idly = read_siaf_table.get_siaf_v2v3_transform(coefffile,full_aperture,to_system='ideal')
    #idl2scix, idl2sciy, idlunit, sciunit = read_siaf_table.get_siaf_transform(coefffile,full_aperture,'ideal','science', 5)

    # Now create a compound model for each with the appropriate inverse
    sci2idl = Mapping([0, 1, 0, 1]) | sci2idlx & sci2idly
    sci2idl.inverse = Mapping([0, 1, 0, 1]) | idl2scix & idl2sciy

    idl2v2v3 = Mapping([0, 1, 0, 1]) | idl2v2v3x & idl2v2v3y
    idl2v2v3.inverse = Mapping([0, 1, 0, 1]) | v2v32idlx & v2v32idly

    # Now string the models together to make a single transformation

    # We also need
    # to account for the difference of 1 between the SIAF
    # coordinate values (indexed to 1) and python (indexed to 0).
    # Nadia said that this shift should be present in the
    # distortion reference file.

    core_model = sci2idl | idl2v2v3

    # Now add in the shifts to create the full model
    # including the shift to go from 0-indexed python coords to
    # 1-indexed

    # SIAF coords
    index_shift = Shift(1)
    model = index_shift & index_shift | xshift & yshift | core_model | v2shift & v3shift

    # Since the inverse of all model components are now defined,
    # the total model inverse is also defined automatically

    # In the reference file headers, we need to switch NRCA5 to
    # NRCALONG, and same for module B.
    if detector[-1] == '5':
        detector = detector[0:4] + 'LONG'

    # Save using the DistortionModel datamodel
    d = DistortionModel(model=model, input_units=u.pix, output_units=u.arcsec)

    #Populate metadata

    # Keyword values in science data to which this file should
    # be applied
    p_pupil = ''
    for p in sci_pupil:
        p_pupil = p_pupil + p + '|'

    p_subarr = ''
    for p in sci_subarr:
        p_subarr = p_subarr + p + '|'

    p_exptype = ''
    for p in sci_exptype:
        p_exptype = p_exptype + p + '|'

    d.meta.instrument.p_pupil = p_pupil
    d.meta.subarray.p_subarray = p_subarr
    d.meta.exposure.p_exptype = p_exptype

    #d.meta.instrument.p_pupil = "CLEAR|F162M|F164N|F323N|F405N|F470N|"
    #d.meta.p_subarray = "FULL|SUB64P|SUB160|SUB160P|SUB320|SUB400P|SUB640|SUB32TATS|SUB32TATSGRISM|SUB8FP1A|SUB8FP1B|SUB96DHSPILA|SUB96DHSPILB|SUB64FP1A|SUB64FP1B|"
    #d.meta.exposure.p_exptype = "NRC_IMAGE|NRC_TSIMAGE|NRC_FLAT|NRC_LED|NRC_WFSC|"

    # metadata describing the reference file itself
    d.meta.title = "NIRCam Distortion"
    d.meta.instrument.name = "NIRCAM"
    d.meta.instrument.module = module
    d.meta.instrument.channel = channel
    d.meta.instrument.detector = detector
    d.meta.telescope = 'JWST'
    d.meta.subarray.name = 'FULL'

    if pedigree is None:
        d.meta.pedigree = 'GROUND'
    else:
        if pedigree.upper() not in ['DUMMY', 'GROUND', 'FLIGHT']:
            raise ValueError("Bad PEDIGREE value.")
        d.meta.pedigree = pedigree.upper()

    d.meta.reftype = 'DISTORTION'

    if author is None:
        author = "B. Hilbert"
    d.meta.author = author

    d.meta.litref = "https://github.com/spacetelescope/nircam_calib/nircam_calib/reffile_creation/pipeline/distortion/nircam_distortion_reffiles_from_pysiaf.py"

    if descrip is None:
        d.meta.description = "TEST OF UPDATED CODE"
    else:
        d.meta.description = descrip

    #d.meta.exp_type = exp_type
    if useafter is None:
        d.meta.useafter = "2014-10-01T00:00:01"
    else:
        d.meta.useafter = useafter

    # To be ready for the future where we will have filter-dependent solutions
    d.meta.instrument.filter = 'N/A'

    # Create initial HISTORY ENTRY
    sdict = {
        'name': 'nircam_distortion_reffiles_from_pysiaf.py',
        'author': author,
        'homepage': 'https://github.com/spacetelescope/nircam_calib',
        'version': '0.0'
    }

    entry = util.create_history_entry(history_entry, software=sdict)
    d.history = [entry]

    #Create additional HISTORY entries
    #entry2 = util.create_history_entry(history_2)
    #d.history.append(entry2)

    d.save(outname)
    print("Output saved to {}".format(outname))
예제 #24
0
def test_NIRISSBackwardDispersion():
    forward_ymodels = [[
        Polynomial2D(2,
                     c0_0=-1.876215,
                     c1_0=-5.179793e-04,
                     c2_0=2.116366e-08,
                     c0_1=-2.259297e-04,
                     c0_2=-2.502127e-12,
                     c1_1=4.771951e-08),
        Polynomial2D(2,
                     c0_0=-3.089115,
                     c1_0=3.063270e-03,
                     c2_0=-9.786785e-07,
                     c0_1=1.237905e-03,
                     c0_2=-1.510774e-11,
                     c1_1=-5.405480e-09)
    ]]

    forward_xmodels = [[
        Polynomial2D(2,
                     c0_0=63.55173,
                     c1_0=3.846599e-06,
                     c2_0=-7.173816e-10,
                     c0_1=8.158127e-07,
                     c0_2=-1.274281e-09,
                     c1_1=4.098804e-11),
        Polynomial2D(2,
                     c0_0=-331.8532,
                     c1_0=-1.24494e-05,
                     c2_0=4.210112e-10,
                     c0_1=-1.615311e-06,
                     c0_2=6.665276e-09,
                     c1_1=1.43762e-10)
    ]]

    forward_lmodels = [
        Polynomial1D(1, c0=0.75, c1=1.55),
        Polynomial1D(1, c0=0.75, c1=1.55)
    ]

    forward_model = transforms.NIRISSForwardColumnGrismDispersion(
        [1, 2], forward_lmodels, forward_xmodels, forward_ymodels)

    # NirissBackward model uses xmodels, ymodels and invlmodels
    lmodels = [
        Polynomial1D(1, c0=-0.48387097, c1=0.64516129),
        Polynomial1D(1, c0=-0.48387097, c1=0.64516129)
    ]

    model = transforms.NIRISSBackwardGrismDispersion([1, 2],
                                                     lmodels=lmodels,
                                                     xmodels=forward_xmodels,
                                                     ymodels=forward_ymodels)

    wavelength = np.array([[2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3],
                           [
                               0.98553179, 0.98553179, 0.98553179, 0.98553179,
                               0.98553179, 0.98553179, 0.98553179
                           ], [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75],
                           [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75],
                           [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75],
                           [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75],
                           [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75]])

    x0 = 913.7
    y0 = 15.5
    order = 1

    slit = create_slit(forward_model, x0, y0, order)
    slit.meta.wcs.bounding_box = ((910, 916), (12, 18))

    expected_xdx = np.array(
        [[
            641.69045022, 642.69044108, 643.69043194, 644.6904228,
            645.69041366, 646.69040451, 647.69039537
        ],
         [
             923.12589407, 924.12589483, 925.1258956, 926.12589636,
             927.12589712, 928.12589788, 929.12589864
         ],
         [
             973.55464886, 974.55465141, 975.55465395, 976.55465648,
             977.55465902, 978.55466155, 979.55466409
         ],
         [
             973.55464968, 974.55465222, 975.55465476, 976.5546573,
             977.55465984, 978.55466237, 979.5546649
         ],
         [
             973.55465049, 974.55465304, 975.55465557, 976.55465811,
             977.55466065, 978.55466318, 979.55466572
         ],
         [
             973.55465131, 974.55465385, 975.55465639, 976.55465892,
             977.55466146, 978.554664, 979.55466653
         ],
         [
             973.55465211, 974.55465466, 975.55465719, 976.55465973,
             977.55466227, 978.5546648, 979.55466734
         ]])

    expected_ydy = np.array(
        [[
            8.57057227, 8.57137444, 8.57217468, 8.57297302, 8.57376944,
            8.57456394, 8.57535653
        ],
         [
             10.5010401, 10.50075594, 10.50047152, 10.50018685, 10.49990193,
             10.49961675, 10.49933131
         ],
         [
             11.6673944, 11.66691562, 11.66643689, 11.66595821, 11.66547956,
             11.66500096, 11.6645224
         ],
         [
             12.66721189, 12.66673317, 12.66625449, 12.66577585, 12.66529725,
             12.66481869, 12.66434018
         ],
         [
             13.66702939, 13.66655071, 13.66607208, 13.66559348, 13.66511493,
             13.66463643, 13.66415796
         ],
         [
             14.66684688, 14.66636825, 14.66588967, 14.66541112, 14.66493262,
             14.66445416, 14.66397574
         ],
         [
             15.66666438, 15.6661858, 15.66570726, 15.66522876, 15.66475031,
             15.66427189, 15.66379352
         ]])

    # refactored call
    x, y = grid_from_bounding_box(slit.meta.wcs.bounding_box)
    xdx, ydy, _, _, _ = model(x, y, wavelength, np.zeros(x.shape) + 1)
    assert_allclose(xdx, expected_xdx)
    assert_allclose(ydy, expected_ydy)
예제 #25
0
def create_grism_config(conffile="",
                        fname="",
                        pupil="",
                        author="STScI",
                        history="NIRISS Grism Parameters",
                        outname=""):
    """
    pupil is the blocking filter
    filter is the grism

    Create an asdf reference file to hold Grism C (column) or Grism R (rows)
    configuration, no sensativity information is included

    Note: The orders are named alphabetically, i.e. Order A, Order B
    There are also sensativity fits files which are tables of wavelength,
    sensativity, and error. These are specified in the conffile but will
    not be read in and saved in the output reference file.

    direct_filter is not specified because it assumes that the wedge
    information (wx,wy) is included in the conf file in one of the key-value
    pairs, where the key includes the beam designation

    For each spectral order, the configuration file contains a pair of
    magnitude-cutoff values. Sources with magnitudes fainter than the
    extraction cutoff (MMAG_EXTRACT_X) are not extracted, but are accounted
    for when computing the spectral contamination and background estimates.
    Sources with magnitudes fainter than the second cutoff (MMAG_MARK_X) are
    completely ignored.  Here, X equals A, B, C, etc., with each letter
    referring to a spectral order, as specified in the configuration file.
     -- the initial conf file that nor gave me didn't have this keyword so
     this code adds a placeholder.

     this reference file also contains the polynomial model which is appropriate
     for the coefficients which are listed.

    Parameters
    ----------
    conffile : str
        The text file with configuration information
    pupil : str
        Name of the grism the conffile corresponds to
    filter : str
        Name of the filter the conffile corresponds to
    author : str
        The name of the author
    history : str
        A comment about the refrence file to be saved with the meta information
    outname : str
        Output name for the reference file


    Returns
    -------
    fasdf : asdf.AsdfFile(jwst.datamodels.NIRISSGrismModel)
    """

    if not history:
        history = "Created from {0:s}".format(conffile)

    # if pupil is none get from filename like NIRCAM_modB_R.conf
    if not fname:
        fname = conffile.split(".")[0]
    if not pupil:
        pupil = conffile.split(".")[1]

    ref_kw = common_reference_file_keywords(reftype="specwcs",
                description="{0:s} dispersion model parameters".format(pupil),
                exp_type="NIS_WFSS",
                model_type='NIRISSGrismModel',
                pupil=pupil,
                filtername=fname,
                history=history,
                author=author,
                filename=outname,
                )

    # get all the key-value pairs from the input file
    conf = dict_from_file(conffile)
    beamdict = split_order_info(conf)
    letter = re.compile("^[a-zA-Z0-9]{0,1}$")  # match one only
    etoken = re.compile("^BEAM_[A-Z,a-z]{1,1}")  # find beam key

    # add min and max mag info if not provided
    # also make beam coeff lists
    # wx are the wedge offsets for the filters
    # in niriss there's a different grism file for each filter

    # for k, bdict in beamdict.items():
    #     if isinstance(bdict, dict):
    #         keys = bdict.keys()
    #         minmag = "MMAG_EXTRACT"
            # maxmag = "MMAG_MARK"
            # if minmag not in keys:
            #     beamdict[k][minmag] = 99.
            # if maxmag not in keys:
            #    beamdict[k][maxmag] = 0.0
            # if "wx" not in keys:
            #    beamdict[k]['wx'] = 0.0
            # if "wy" not in keys:
            #    beamdict[k]['wy'] = 0.0

    # add to the big tree
    # tree['spectral_orders'] = beamdict

    # add the polynomial model for this file.
    # this structure allows there to be a different polynomial relationship
    # for each order if necessary. Either way, the coefficients should be
    # stored with the polynomials since they are directly dependent on
    # each other
    # for order in tree['spectral_orders']:
    #     print("order: {}".format(order))
    #     xc = tree['spectral_orders'][order]["DISPX"]
    #     yc = tree['spectral_orders'][order]["DISPY"]
    #     lc = tree['spectral_orders'][order]["DISPL"]
    #     print("{} {} {}".format(xc, yc, lc))
    #     model = models.PolyTraceDispersion(xc, yc, lc, w)
    #     tree['spectral_orders'][order]['model'] = model

    # The lists below need
    # to remain ordered and referenced by filter or order
    orders = sorted(beamdict.keys())

    # disp[] per sorted order
    displ = []
    dispx = []
    dispy = []
    invdispl = []

    for order in orders:
        # convert the displ wavelengths to microns
        l0 = beamdict[order]['DISPL'][0] / 10000.
        l1 = beamdict[order]['DISPL'][1] / 10000.
        # create polynomials for the coefficients of each order
        invdispl.append(Polynomial1D(1, c0=-l0/l1, c1=1./l1))
        displ.append(Polynomial1D(1, c0=l0, c1=l1))

        # the dispxy functions here are pulled into a 1D
        # such that the final poly is ans = x_model + t*y_model

        e0, e1 = beamdict[order]['DISPX']
        model_x = Polynomial2D(2, c0_0=e0[0], c1_0=e0[1], c2_0=e0[4],
                               c0_1=e0[2], c1_1=e0[5], c0_2=e0[3])
        model_y = Polynomial2D(2, c0_0=e1[0], c1_0=e1[1], c2_0=e1[4],
                               c0_1=e1[2], c1_1=e1[5], c0_2=e1[3])
        dispx.append((model_x, model_y))

        e0, e1 = beamdict[order]['DISPY']
        model_x = Polynomial2D(2, c0_0=e0[0], c1_0=e0[1], c2_0=e0[4],
                               c0_1=e0[2], c1_1=e0[5], c0_2=e0[3])
        model_y = Polynomial2D(2, c0_0=e1[0], c1_0=e1[1], c2_0=e1[4],
                               c0_1=e1[2], c1_1=e1[5], c0_2=e1[3])
        dispy.append((model_x, model_y))
        # disp is x_model + t*y_model
        # invdisp is (t - model_x) / model_y

    # change the orders into translatable integer strings
    # the conf file niriss is giving me are using letter designations
    beam_lookup = {"A": "+1", "B": "0", "C": "+2", "D": "+3", "E": "-1"}
    ordermap = [int(beam_lookup[order]) for order in orders]

    # save the reference file
    ref = NIRISSGrismModel()
    ref.meta.update(ref_kw)
    ref.meta.input_units = u.micron
    ref.meta.output_units = u.micron
    ref.dispx = dispx
    ref.dispy = dispy
    ref.displ = displ
    ref.invdispl = invdispl
    ref.fwcpos_ref = conf['FWCPOS_REF']
    ref.orders = ordermap
    entry = HistoryEntry({'description': history, 'time': datetime.datetime.utcnow()})
    sdict = Software({'name': 'niriss_reftools.py',
                      'author': author,
                      'homepage': 'https://github.com/spacetelescope/jwreftools',
                      'version': '0.7.1'})
    entry['sofware'] = sdict
    ref.history['entries'] = [entry]
    ref.to_asdf(outname)
    ref.validate()
예제 #26
0

def test_invalid_operands():
    """
    Test that certain operators do not work with models whose inputs/outputs do
    not match up correctly.
    """

    with pytest.raises(ModelDefinitionError):
        Rotation2D(90) | Gaussian1D(1, 0, 0.1)

    with pytest.raises(ModelDefinitionError):
        Rotation2D(90) + Gaussian1D(1, 0, 0.1)


@pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2)])
def test_compound_with_polynomials_2d(poly):
    """
    Tests that polynomials are scaled when used in compound models.
    Issue #3699
    """
    poly.parameters = [1, 2, 3, 4, 1, 2]
    shift = Shift(3)
    model = poly | shift
    x, y = np.mgrid[:20, :37]
    result_compound = model(x, y)
    result = shift(poly(x, y))
    assert_allclose(result, result_compound)


def test_fix_inputs():
예제 #27
0
def test_NIRISSForwardColumnGrismDispersion():
    ymodels = [[
        Polynomial2D(2,
                     c0_0=-1.876215,
                     c1_0=-5.179793e-04,
                     c2_0=2.116366e-08,
                     c0_1=-2.259297e-04,
                     c0_2=-2.502127e-12,
                     c1_1=4.771951e-08),
        Polynomial2D(2,
                     c0_0=-3.089115,
                     c1_0=3.063270e-03,
                     c2_0=-9.786785e-07,
                     c0_1=1.237905e-03,
                     c0_2=-1.510774e-11,
                     c1_1=-5.405480e-09)
    ]]

    xmodels = [[
        Polynomial2D(2,
                     c0_0=63.55173,
                     c1_0=3.846599e-06,
                     c2_0=-7.173816e-10,
                     c0_1=8.158127e-07,
                     c0_2=-1.274281e-09,
                     c1_1=4.098804e-11),
        Polynomial2D(2,
                     c0_0=-331.8532,
                     c1_0=-1.24494e-05,
                     c2_0=4.210112e-10,
                     c0_1=-1.615311e-06,
                     c0_2=6.665276e-09,
                     c1_1=1.43762e-10)
    ]]

    lmodels = [
        Polynomial1D(1, c0=0.75, c1=1.55),
        Polynomial1D(1, c0=0.75, c1=1.55)
    ]

    model = transforms.NIRISSForwardColumnGrismDispersion([1, 2, 3, -1],
                                                          lmodels=lmodels,
                                                          xmodels=xmodels,
                                                          ymodels=ymodels,
                                                          theta=33.5677)

    x0 = 913.7
    y0 = 15.5
    order = 1

    slit = create_slit(model, x0, y0, order)
    slit.meta.wcs.bounding_box = ((910, 916), (12, 18))

    expected = np.array([[
        1.05844596, 1.05844596, 1.05844596, 1.05844596, 1.05844596, 1.05844596,
        1.05844596
    ],
                         [
                             1.0500404, 1.0500404, 1.0500404, 1.0500404,
                             1.0500404, 1.0500404, 1.0500404
                         ],
                         [
                             1.04163483, 1.04163483, 1.04163483, 1.04163483,
                             1.04163483, 1.04163483, 1.04163483
                         ],
                         [
                             1.03322927, 1.03322927, 1.03322927, 1.03322927,
                             1.03322927, 1.03322927, 1.03322927
                         ],
                         [
                             1.02482371, 1.02482371, 1.02482371, 1.02482371,
                             1.02482371, 1.02482371, 1.02482371
                         ],
                         [
                             1.01641815, 1.01641815, 1.01641815, 1.01641815,
                             1.01641815, 1.01641815, 1.01641815
                         ],
                         [
                             1.00801258, 1.00801258, 1.00801258, 1.00801258,
                             1.00801258, 1.00801258, 1.00801258
                         ]])

    # refactored call
    x, y = grid_from_bounding_box(slit.meta.wcs.bounding_box)
    wavelength = compute_wavelength_array(slit)
    assert_allclose(wavelength, expected)
def create_nircam_distortion(detector, aperture, outname, sci_pupil,
                             sci_subarr, sci_exptype, history_entry):
    """
    Create an asdf reference file with all distortion components for the NIRCam imager.

    NOTE: The IDT has not provided any distortion information. The files are constructed
    using ISIM transformations provided/(computed?) by the TEL team which they use to
    create the SIAF file.
    These reference files should be replaced when/if the IDT provides us with distortion.

    Parameters
    ----------
    detector : str
        NRCB1, NRCB2, NRCB3, NRCB4, NRCB5, NRCA1, NRCA2, NRCA3, NRCA4, NRCA5
    aperture : str
        Name of the aperture/subarray. (e.g. FULL, SUB160, SUB320, SUB640, GRISM_F322W2)
    outname : str
        Name of output file.

    Examples
    --------

    """
    degree = 5  # distotion in pysiaf is a 5th order polynomial
    numdet = detector[-1]
    module = detector[-2]
    channel = 'SHORT'
    if numdet == '5':
        channel = 'LONG'

    full_aperture = detector + '_' + aperture

    # Get Siaf instance for detector/aperture
    inst_siaf = pysiaf.Siaf('nircam')
    siaf = inst_siaf[full_aperture]

    # Find the distance between (0,0) and the reference location
    xshift, yshift = get_refpix(inst_siaf, full_aperture)

    # *****************************************************
    # "Forward' transformations. science --> ideal --> V2V3
    label = 'Sci2Idl'
    #from_units = 'distorted pixels'
    #to_units = 'arcsec'

    xcoeffs, ycoeffs = get_distortion_coeffs(label, siaf)

    sci2idlx = Polynomial2D(degree, **xcoeffs)
    sci2idly = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    parity = siaf.VIdlParity
    v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.
    idl2v2v3x, idl2v2v3y = v2v3_model('ideal', 'v2v3', parity, v3_ideal_y_angle)

    # Finally, we need to shift by the v2,v3 value of the reference
    # location in order to get to absolute v2,v3 coordinates
    v2shift, v3shift = get_v2v3ref(siaf)

    # *****************************************************
    # 'Reverse' transformations. V2V3 --> ideal --> science
    label = 'Idl2Sci'
    #from_units = 'arcsec'
    #to_units = 'distorted pixels'

    xcoeffs, ycoeffs = get_distortion_coeffs(label, siaf)

    idl2scix = Polynomial2D(degree, **xcoeffs)
    idl2sciy = Polynomial2D(degree, **ycoeffs)

    # Get info for ideal -> v2v3 or v2v3 -> ideal model
    parity = siaf.VIdlParity
    v3_ideal_y_angle = siaf.V3IdlYAngle * np.pi / 180.
    v2v32idlx, v2v32idly = v2v3_model('v2v3', 'ideal', parity, v3_ideal_y_angle)

    ##"Forward' transformations. science --> ideal --> V2V3
    #sci2idlx, sci2idly, sciunit, idlunit = read_siaf_table.get_siaf_transform(coefffile,full_aperture,'science','ideal', 5)
    #idl2v2v3x, idl2v2v3y = read_siaf_table.get_siaf_v2v3_transform(coefffile,full_aperture,from_system='ideal')

    ##'Reverse' transformations. V2V3 --> ideal --> science
    #v2v32idlx, v2v32idly = read_siaf_table.get_siaf_v2v3_transform(coefffile,full_aperture,to_system='ideal')
    #idl2scix, idl2sciy, idlunit, sciunit = read_siaf_table.get_siaf_transform(coefffile,full_aperture,'ideal','science', 5)

    # Now create a compound model for each with the appropriate inverse
    sci2idl = Mapping([0, 1, 0, 1]) | sci2idlx & sci2idly
    sci2idl.inverse = Mapping([0, 1, 0, 1]) | idl2scix & idl2sciy

    idl2v2v3 = Mapping([0, 1, 0, 1]) | idl2v2v3x & idl2v2v3y
    idl2v2v3.inverse = Mapping([0, 1, 0, 1]) | v2v32idlx & v2v32idly

    # Now string the models together to make a single transformation

    # We also need
    # to account for the difference of 1 between the SIAF
    # coordinate values (indexed to 1) and python (indexed to 0).
    # Nadia said that this shift should be present in the
    # distortion reference file.

    core_model = sci2idl | idl2v2v3

    # Now add in the shifts to create the full model
    # including the shift to go from 0-indexed python coords to
    # 1-indexed

    # SIAF coords
    index_shift = Shift(1)
    model = index_shift & index_shift | xshift & yshift | core_model | v2shift & v3shift

    # Since the inverse of all model components are now defined,
    # the total model inverse is also defined automatically

    # In the reference file headers, we need to switch NRCA5 to
    # NRCALONG, and same for module B.
    if detector[-1] == '5':
        detector = detector[0:4] + 'LONG'

    # Save using the DistortionModel datamodel
    d = DistortionModel(model=model, input_units=u.pix,
                        output_units=u.arcsec)

    #Populate metadata

    # Keyword values in science data to which this file should
    # be applied
    p_pupil = ''
    for p in sci_pupil:
        p_pupil = p_pupil + p + '|'

    p_subarr = ''
    for p in sci_subarr:
        p_subarr = p_subarr + p + '|'

    p_exptype = ''
    for p in sci_exptype:
        p_exptype = p_exptype + p + '|'

    d.meta.instrument.p_pupil = p_pupil
    d.meta.subarray.p_subarray = p_subarr
    d.meta.exposure.p_exptype = p_exptype

    #d.meta.instrument.p_pupil = "CLEAR|F162M|F164N|F323N|F405N|F470N|"
    #d.meta.p_subarray = "FULL|SUB64P|SUB160|SUB160P|SUB320|SUB400P|SUB640|SUB32TATS|SUB32TATSGRISM|SUB8FP1A|SUB8FP1B|SUB96DHSPILA|SUB96DHSPILB|SUB64FP1A|SUB64FP1B|"
    #d.meta.exposure.p_exptype = "NRC_IMAGE|NRC_TSIMAGE|NRC_FLAT|NRC_LED|NRC_WFSC|"

    # metadata describing the reference file itself
    d.meta.title = "NIRCam Distortion"
    d.meta.instrument.name = "NIRCAM"
    d.meta.instrument.module = module
    d.meta.instrument.channel = channel
    d.meta.instrument.detector = detector
    d.meta.telescope = 'JWST'
    d.meta.subarray.name = 'FULL'
    d.meta.pedigree = 'GROUND'
    d.meta.reftype = 'DISTORTION'
    d.meta.author = 'B. Hilbert'
    d.meta.litref = "https://github.com/spacetelescope/jwreftools"
    d.meta.description = "Distortion model from SIAF coefficients in pysiaf version 0.6.1"
    #d.meta.exp_type = exp_type
    d.meta.useafter = "2014-10-01T00:00:00"

    # To be ready for the future where we will have filter-dependent solutions
    d.meta.instrument.filter = 'N/A'

    # Create initial HISTORY ENTRY
    sdict = {'name': 'nircam_distortion_reffiles_from_pysiaf.py',
             'author': 'B.Hilbert',
             'homepage': 'https://github.com/spacetelescope/jwreftools',
             'version': '0.8'}

    entry = util.create_history_entry(history_entry, software=sdict)
    d.history = [entry]

    #Create additional HISTORY entries
    #entry2 = util.create_history_entry(history_2)
    #d.history.append(entry2)

    d.save(outname)
    print("Output saved to {}".format(outname))