コード例 #1
0
def ifu_slicer2asdf(ifuslicer, outname):
    """
    Create an asdf reference file with the MSA description.

    ifu_slicer2asdf("IFU_slicer.sgd", "ifu_slicer.asdf")

    Parameters
    ----------
    ifuslicer : str
        A fits file with the IFU slicer description
    outname : str
        Name of output ASDF file.
    """
    ref_kw = common_reference_file_keywords("IFUSLICER", "NIRSPEC IFU SLICER description - CDP4")
    f = fits.open(ifuslicer)
    tree = ref_kw.copy()
    data = f[1].data
    header = f[1].header
    shiftx = models.Shift(header['XREF'], name='ifu_slicer_xref')
    shifty = models.Shift(header['YREF'], name='ifu_slicer_yref')
    rot = models.Rotation2D(header['ROT'], name='ifu_slicer_rot')
    model = rot | shiftx & shifty
    tree['model'] = model
    tree['data'] = f[1].data
    f.close()
    fasdf = AsdfFile()
    fasdf.tree = tree
    fasdf.write_to(outname)
    return fasdf
コード例 #2
0
ファイル: validate.py プロジェクト: STScI-JWST/jwst
def _check_value(value, schema):
    """
    Perform the actual validation.
    """
    if value is None:
        if schema.get('fits_required'):
            name = schema.get("fits_keyword") or schema.get("fits_hdu")
            raise jsonschema.ValidationError("%s is a required value"
                                              % name)
    else:
        validator_context = AsdfFile()
        validator_resolver = validator_context.resolver

        temp_schema = {
            '$schema':
            'http://stsci.edu/schemas/asdf-schema/0.1.0/asdf-schema'}
        temp_schema.update(schema)
        validator = asdf_schema.get_validator(temp_schema,
                                              validator_context,
                                              validator_callbacks,
                                              validator_resolver)

        value = yamlutil.custom_tree_to_tagged_tree(value, validator_context)
        validator.validate(value, _schema=temp_schema)
        validator_context.close()
コード例 #3
0
ファイル: miri.py プロジェクト: nden/jwst
def alpha_beta2XanYan(input_model, reference_files):
    """
    Create the transform from detector to Xan, Yan frame.

    forward transform:
      RegionsSelector
        label_mapper is LabelMapperDict()
        {channel_wave_range (): channel_number}
        selector is {channel_number: ab2Xan & ab2Yan}
    bacward_transform
      RegionsSelector
        label_mapper is LabelMapperDict()
        {channel_wave_range (): channel_number}
        selector is {channel_number: Xan2ab & Yan2ab}
    """
    band = input_model.meta.instrument.band
    channel = input_model.meta.instrument.channel
    # used to read the wavelength range
    channels = [c + band for c in channel]

    f = AsdfFile.open(reference_files['v2v3'])
    v23 = f.tree['model']
    f.close()
    f = AsdfFile.open(reference_files['wavelengthrange'])
    # the following should go in the asdf reader
    wave_range = f.tree['wavelengthrange'].copy()
    wave_channels = f.tree['channels']
    wr = {}
    for ch, r in zip(wave_channels, wave_range):
        wr[ch] = r
    f.close()

    dict_mapper = {}
    sel = {}
    for c in channels:
        ch = int(c[0])
        dict_mapper[tuple(wr[c])] = models.Mapping((2,), name="mapping_lam") | \
                   models.Const1D(ch, name="channel #")
        map1 = models.Mapping((1, 0, 1, 0), name='map2poly')
        map1._outputs = ('alpha', 'beta', 'alpha', 'beta')
        map1._inputs = ('alpha', 'beta')
        map1.inverse = models.Mapping((0, 1))
        ident1 = models.Identity(1, name='identity_lam')
        ident1._inputs = ('lam',)
        chan_v23 = v23[c]
        v23chan_backward = chan_v23.inverse
        del chan_v23.inverse
        v23_spatial = map1 | chan_v23
        v23_spatial.inverse = map1 | v23chan_backward
        v23c = v23_spatial & ident1
        sel[ch] = v23c

    wave_range_mapper = selector.LabelMapperRange(('alpha', 'beta', 'lam'), dict_mapper,
                                                  inputs_mapping=models.Mapping([2,]))
    wave_range_mapper.inverse = wave_range_mapper.copy()
    ab2xyan = selector.RegionsSelector(('alpha', 'beta', 'lam'), ('v2', 'v3', 'lam'),
                                      label_mapper=wave_range_mapper,
                                      selector=sel)

    return ab2xyan
コード例 #4
0
ファイル: nirspec.py プロジェクト: philhodge/jwst
def detector_to_gwa(reference_files, detector, disperser):
    """
    Transform from DETECTOR frame to GWA frame.

    Parameters
    ----------
    reference_files: dict
        Dictionary with reference files returned by CRDS.
    detector : str
        The detector keyword.
    disperser : dict
        A corrected disperser ASDF object.

    Returns
    -------
    model : `~astropy.modeling.core.Model` model.
        Transform from DETECTOR frame to GWA frame.

    """
    with AsdfFile.open(reference_files['fpa']) as f:
        fpa = f.tree[detector].copy()
    with AsdfFile.open(reference_files['camera']) as f:
        camera = f.tree['model'].copy()

    angles = [disperser['theta_x'], disperser['theta_y'],
               disperser['theta_z'], disperser['tilt_y']]
    rotation = Rotation3DToGWA(angles, axes_order="xyzy", name='rotaton')
    u2dircos = Unitless2DirCos(name='unitless2directional_cosines')
    model = (models.Shift(-1) & models.Shift(-1) | fpa | camera | u2dircos | rotation)
    return model
コード例 #5
0
ファイル: mask.py プロジェクト: SKIRT/PTS
    def saveto_asdf(self, path, header=None, update_path=True):

        """
        This function ...
        :param path:
        :param header:
        :param update_path:
        :return:
        """

        # If a header is not specified, created it from the WCS
        if header is None: header = self.header

        # Import
        from asdf import AsdfFile

        # Create the tree
        tree = dict()

        tree["data"] = self._data
        tree["header"] = header

        # Create the asdf file
        ff = AsdfFile(tree)

        # Write
        ff.write_to(path)

        # Update the path
        if update_path: self.path = path
コード例 #6
0
ファイル: test_wcs.py プロジェクト: vmarkovtsev/asdf
def test_backwards_compat_gcrs():
    obsgeoloc = (
        3.0856775814671916e+16,
        9.257032744401574e+16,
        6.1713551629343834e+19
    )
    obsgeovel = (2.0, 1.0, 8.0)

    old_frame_yaml =  """
frames:
  - !wcs/celestial_frame-1.0.0
    axes_names: [lon, lat]
    name: CelestialFrame
    reference_frame:
      type: GCRS
      obsgeoloc:
        - [%f, %f, %f]
        - !unit/unit-1.0.0 m
      obsgeovel:
        - [%f, %f, %f]
        - !unit/unit-1.0.0 m s-1
      obstime: !time/time-1.0.0 2010-01-01 00:00:00.000
    unit: [!unit/unit-1.0.0 deg, !unit/unit-1.0.0 deg]
""" % (obsgeovel + obsgeoloc)

    new_frame_yaml = """
frames:
  - !wcs/celestial_frame-1.1.0
    axes_names: [lon, lat]
    name: CelestialFrame
    reference_frame:
      type: GCRS
      obsgeoloc:
      - !unit/quantity-1.1.0 {unit: !unit/unit-1.0.0 m, value: %f}
      - !unit/quantity-1.1.0 {unit: !unit/unit-1.0.0 m, value: %f}
      - !unit/quantity-1.1.0 {unit: !unit/unit-1.0.0 m, value: %f}
      obsgeovel:
      - !unit/quantity-1.1.0 {unit: !unit/unit-1.0.0 m s-1, value: %f}
      - !unit/quantity-1.1.0 {unit: !unit/unit-1.0.0 m s-1, value: %f}
      - !unit/quantity-1.1.0 {unit: !unit/unit-1.0.0 m s-1, value: %f}
      obstime: !time/time-1.1.0 2010-01-01 00:00:00.000
    unit: [!unit/unit-1.0.0 deg, !unit/unit-1.0.0 deg]
""" % (obsgeovel + obsgeoloc)

    old_buff = helpers.yaml_to_asdf(old_frame_yaml)
    old_asdf = AsdfFile.open(old_buff)
    old_frame = old_asdf.tree['frames'][0]
    old_loc = old_frame.reference_frame.obsgeoloc
    old_vel = old_frame.reference_frame.obsgeovel

    new_buff = helpers.yaml_to_asdf(new_frame_yaml)
    new_asdf = AsdfFile.open(new_buff)
    new_frame = new_asdf.tree['frames'][0]
    new_loc = new_frame.reference_frame.obsgeoloc
    new_vel = new_frame.reference_frame.obsgeovel

    assert (old_loc.x == new_loc.x and old_loc.y == new_loc.y and
        old_loc.z == new_loc.z)
    assert (old_vel.x == new_vel.x and old_vel.y == new_vel.y and
        old_vel.z == new_vel.z)
コード例 #7
0
def prism2asdf(prifile, tiltyfile, tiltxfile, outname):
    """Create a NIRSPEC prism disperser reference file in ASDF format.

    Combine information stored in disperser_G?.dis and disperser_G?_TiltY.gtp
    files delievred by the IDT.


    Parameters
    ----------
    prifile : list or str
        File with primary information for the PRSIM
    tiltyfile : str
        File with tilt_Y data, e.g. disperser_PRISM_TiltY.gtp.
    tiltxfile: str
        File with tilt_x data, e.g. disperser_PRISM_TiltX.gtp.
    outname : str
        Name of output ASDF file.

    Returns
    -------
    fasdf : asdf.AsdfFile

    """

    params = common_reference_file_keywords("PRISM", "NIRSPEC PRISM Model")
    flist = [prifile, tiltyfile, tiltxfile]

    # translate the files
    for fname in flist:
        try:
            refparams = dict_from_file(fname)
        except:
            print("Disperser file was not converted.")
            raise

        pdict = {}
        coeffs = {}
        parts = fname.lower().split(".")[0]
        ref = str("_".join(parts.split("_")[1:]))

        if "pri" not in fname:
            try:
                for i, c in enumerate(refparams['CoeffsTemperature00']):
                    coeffs['c' + str(i)] = c
                pdict['tilt_model'] = models.Polynomial1D(len(coeffs)-1, **coeffs)
                del refparams['CoeffsTemperature00']
            except KeyError:
                print("Missing CoeffsTemperature in {0}".format(fname))
                raise

        # store the rest of the keys
        for k, v in refparams.items():
            pdict[k] = v
        print(pdict)
        params[ref] = pdict

    fasdf = AsdfFile()
    fasdf.tree = params
    fasdf.write_to(outname)
    return fasdf
コード例 #8
0
ファイル: nirspec.py プロジェクト: hbushouse/jwst
def ifuslit_to_msa(slits, reference_files):
    """
    The transform from slit_frame to msa_frame.

    Parameters
    ----------
    slits_id : list
        A list of slit IDs for all open shutters/slitlets.
    msafile : str
        The name of the msa reference file.

    Returns
    -------
    model : `~jwst_lib.pipeline_models.Slit2Msa` model.
        Transform from slit_frame to msa_frame.
    """
    with AsdfFile.open(reference_files['ifufore']) as f:
        ifufore = f.tree['model']

    ifuslicer = AsdfFile.open(reference_files['ifuslicer'])
    models = {}
    ifuslicer_model = (ifuslicer.tree['model']).rename('ifuslicer_model')
    for slit in slits:
        slitdata = ifuslicer.tree['data'][slit]
        slitdata_model = (get_slit_location_model(slitdata)).rename('slitdata_model')
        msa_transform = slitdata_model | ifuslicer_model
        models[slit] = msa_transform
    ifuslicer.close()

    return Slit2Msa(models)
コード例 #9
0
def calc_cube(numb, fried_parameter = 4, time_between = 0.7):
    filepath = os.getcwd().split("vApp_reduction",1)[0]+"vApp_reduction/data/psf_cube_cache/"
    filepath += "psf_cube_"+str(float(fried_parameter))+"_"+str(float(time_between))+"_"+str(int(numb))+".asdf"

    #expand to only demand numb =< numb on disk
    if os.path.exists(filepath):

        tree = AsdfFile.open(filepath, copy_arrays=True).tree
        tree_keys = tree.keys()
        psf_params = sorted(list(filter(lambda key: isinstance(key, float), tree_keys)))
        psf_cube = list(map(lambda param: np.copy(tree[param]), psf_params))
        return psf_cube, psf_params
    else:
        path =  os.getcwd()+"/code/psf/generate_vAPP_cube.py"
        params = [str(fried_parameter), str(time_between), str(numb)]
        cmd = [sys.executable, path, *params]
        print("please run: ")
        print(' '.join(cmd))
        input("Press Enter to continue...")

        tree = AsdfFile.open(filepath).tree
        tree_keys = tree.keys()
        psf_params = sorted(list(filter(lambda key: isinstance(key, float), tree_keys)))
        psf_cube = list(map(lambda param: tree[param], psf_params))

        return psf_cube, psf_params
コード例 #10
0
ファイル: nirspec.py プロジェクト: philhodge/jwst
def ifu_msa_to_oteip(reference_files):
    """
    Transform from the MSA frame to the OTEIP frame.

    Parameters
    ----------
    reference_files: dict
        Dictionary with reference files returned by CRDS.

    Returns
    -------
    model : `~astropy.modeling.core.Model` model.
        Transform from MSA to OTEIP.
    """
    with AsdfFile.open(reference_files['fore']) as f:
        fore = f.tree['model'].copy()
    with AsdfFile.open(reference_files['ifufore']) as f:
        ifufore = f.tree['model'].copy()

    msa2fore_mapping = Mapping((0, 1, 2, 2))
    msa2fore_mapping.inverse = Identity(3)
    ifu_fore_transform = ifufore & Identity(1)
    ifu_fore_transform.inverse = Mapping((0, 1, 2, 2)) | ifufore.inverse & Identity(1)
    fore_transform = msa2fore_mapping | fore & Identity(1)
    return msa2fore_mapping | ifu_fore_transform | fore_transform
コード例 #11
0
ファイル: miri.py プロジェクト: nden/jwst
def imaging_distortion(input_model, reference_files):
    """
    Create pixe2sky and sky2pixel transformation for the MIRI imager.

    Parameters
    ----------
    model : jwst_lib.models.ImagingModel
        input model
    reference_files : dict
        reference files from CRDS


    using CDP 3 Reference distortion file
    MIRI_FM_MIRIMAGE_F1000W_PSF_03.01.00.fits

    reference files/corrections needed (pixel to sky):

    1. Filter dependent shift in (x,y) (!with an oposite sign to that delievred by the IT)
    2. Apply MI
    3. Apply Ai and BI matrices
    4. Apply the TI matrix (this gives V2/V3 coordinates)
    5. Apply V2/V3 to sky transformation

    ref_file: filter_offset.asdf - (1)
    ref_file: distortion.asdf -(2,3,4)
    """
    distortion = AsdfFile.open(reference_files['distortion']).tree['model']
    filter_offset = AsdfFile.open(reference_files['filteroffset']).tree[input_model.meta.instrument.filter]
    full_distortion = models.Shift(filter_offset['row_offset']) & models.Shift(
        filter_offset['column_offset']) | distortion
    full_distortion = full_distortion.rename('distortion')
    return full_distortion
コード例 #12
0
def ifupost2asdf(ifupost_files, outname):
    """
    Create a reference file of type ``ifupost`` .

    Combines all IDT ``IFU-POST`` reference files in one ASDF file.

    forward direction : MSA to Collimator
    backward_direction: Collimator to MSA

    Parameters
    ----------
    ifupost_files : list
        Names of all ``IFU-POST`` IDT reference files
    outname : str
        Name of output ``ASDF`` file
    """
    ref_kw = common_reference_file_keywords("IFUPOST", "NIRSPEC IFU-POST transforms - CDP4")
    fa = AsdfFile()
    fa.tree = ref_kw
    for fifu in ifupost_files:
        n = int((fifu.split('IFU-POST_')[1]).split('.pcf')[0])
        fa.tree[n] = {}
        with open(fifu) as f:
            lines = [l.strip() for l in f.readlines()]
        factors = lines[lines.index('*Factor 2') + 1].split()
        rotation_angle = float(lines[lines.index('*Rotation') + 1])
        input_rot_center = lines[lines.index('*InputRotationCentre 2') + 1].split()
        output_rot_center = lines[lines.index('*OutputRotationCentre 2') + 1].split()
        linear_sky2det = homothetic_sky2det(input_rot_center, rotation_angle, factors, output_rot_center)

        degree = int(lines[lines.index('*FitOrder') + 1])

        xcoeff_index = lines.index('*xForwardCoefficients 21 2')
        xlines = lines[xcoeff_index + 1: xcoeff_index + 22]
        xcoeff_forward = coeffs_from_pcf(degree, xlines)
        x_poly_forward = models.Polynomial2D(degree, name='x_poly_forward', **xcoeff_forward)

        ycoeff_index = lines.index('*yForwardCoefficients 21 2')
        ycoeff_forward = coeffs_from_pcf(degree, lines[ycoeff_index + 1: ycoeff_index + 22])
        y_poly_forward = models.Polynomial2D(degree, name='y_poly_forward', **ycoeff_forward)

        xcoeff_index = lines.index('*xBackwardCoefficients 21 2')
        xcoeff_backward = coeffs_from_pcf(degree, lines[xcoeff_index + 1: xcoeff_index + 22])
        x_poly_backward = models.Polynomial2D(degree, name='x_poly_backward', **xcoeff_backward)

        ycoeff_index = lines.index('*yBackwardCoefficients 21 2')
        ycoeff_backward = coeffs_from_pcf(degree, lines[ycoeff_index + 1: ycoeff_index + 22])
        y_poly_backward = models.Polynomial2D(degree, name='y_poly_backward', **ycoeff_backward)

        output2poly_mapping = Identity(2, name='output_mapping')
        output2poly_mapping.inverse = Mapping([0, 1, 0, 1])
        input2poly_mapping = Mapping([0, 1, 0, 1], name='input_mapping')
        input2poly_mapping.inverse = Identity(2)

        model_poly = input2poly_mapping | (x_poly_forward & y_poly_forward) | output2poly_mapping

        model = linear_sky2det | model_poly
        fa.tree[n]['model'] = model
    asdffile = fa.write_to(outname)
    return asdffile
コード例 #13
0
def pcf_forward(pcffile, outname):
    """
    Create the **IDT** forward transform from collimator to gwa.
    """
    with open(pcffile) as f:
        lines = [l.strip() for l in f.readlines()]

    factors = lines[lines.index('*Factor 2') + 1].split()
    # factor==1/factor in backward msa2ote direction and factor==factor in sky2detector direction
    scale = models.Scale(float(factors[0]), name="x_scale") & \
          models.Scale(float(factors[1]), name="y_scale")

    rotation_angle = lines[lines.index('*Rotation') + 1]
    # The minius sign here is because astropy.modeling has the opposite direction of rotation than the idl implementation
    rotation = models.Rotation2D(-float(rotation_angle), name='rotation')


    # Here the model is called "output_shift" but in the team version it is the "input_shift".
    input_rot_center = lines[lines.index('*InputRotationCentre 2') + 1].split()
    input_rot_shift = models.Shift(-float(input_rot_center[0]), name='input_x_shift') & \
                 models.Shift(-float(input_rot_center[1]), name='input_y_shift')


    # Here the model is called "input_shift" but in the team version it is the "output_shift".
    output_rot_center = lines[lines.index('*OutputRotationCentre 2') + 1].split()
    output_rot_shift = models.Shift(float(output_rot_center[0]), name='output_x_shift') & \
                  models.Shift(float(output_rot_center[1]), name='output_y_shift')

    degree = int(lines[lines.index('*FitOrder') + 1])
    xcoeff_index = lines.index('*xForwardCoefficients 21 2')
    xlines = lines[xcoeff_index + 1: xcoeff_index + 22]
    xcoeff_forward = coeffs_from_pcf(degree, xlines)
    x_poly_forward = models.Polynomial2D(degree, name='x_poly_forward', **xcoeff_forward)

    ycoeff_index = lines.index('*yForwardCoefficients 21 2')
    ycoeff_forward = coeffs_from_pcf(degree, lines[ycoeff_index + 1: ycoeff_index + 22])
    y_poly_forward = models.Polynomial2D(degree, name='y_poly_forward', **ycoeff_forward)

    xcoeff_index = lines.index('*xBackwardCoefficients 21 2')
    xcoeff_backward = coeffs_from_pcf(degree, lines[xcoeff_index + 1: xcoeff_index + 22])
    x_poly_backward = models.Polynomial2D(degree, name='x_poly_backward', **xcoeff_backward)

    ycoeff_index = lines.index('*yBackwardCoefficients 21 2')
    ycoeff_backward = coeffs_from_pcf(degree, lines[ycoeff_index + 1: ycoeff_index + 22])
    y_poly_backward = models.Polynomial2D(degree, name='y_poly_backward', **ycoeff_backward)

    x_poly_forward.inverse = x_poly_backward
    y_poly_forward.inverse = y_poly_backward

    poly_mapping1  = Mapping((0, 1, 0, 1))
    poly_mapping1.inverse = Identity(2)
    poly_mapping2 = Identity(2)
    poly_mapping2.inverse = Mapping((0, 1, 0, 1))

    model = input_rot_shift | rotation | scale | output_rot_shift | \
          poly_mapping1 | x_poly_forward & y_poly_forward | poly_mapping2
    f = AsdfFile()
    f.tree = {'model': model}
    f.write_to(outname)
コード例 #14
0
def ote2asdf(otepcf, outname, ref_kw):
    """
    ref_kw = common_reference_file_keywords('OTE', 'NIRSPEC OTE transform - CDP4')

    ote2asdf('Model/Ref_Files/CoordTransform/OTE.pcf', 'jwst_nirspec_ote_0001.asdf', ref_kw)
    """
    with open(otepcf) as f:
        lines = [l.strip() for l in f.readlines()]

    factors = lines[lines.index('*Factor 2 1') + 1].split()
    # this corresponds to modeling Rotation direction as is
    rotation_angle = float(lines[lines.index('*Rotation') + 1])
    input_rot_center = lines[lines.index('*InputRotationCentre 2 1') + 1].split()
    output_rot_center = lines[lines.index('*OutputRotationCentre 2 1') + 1].split()

    mlinear = homothetic_det2sky(input_rot_center, rotation_angle, factors, output_rot_center)

    degree = int(lines[lines.index('*FitOrder') + 1])

    xcoeff_index = lines.index('*xBackwardCoefficients 21 2')
    xlines = lines[xcoeff_index + 1].split('\t')
    xcoeff_backward = coeffs_from_pcf(degree, xlines)
    x_poly_forward = models.Polynomial2D(degree, name='x_poly_forward', **xcoeff_backward)

    xcoeff_index = lines.index('*xForwardCoefficients 21 2')
    xlines = lines[xcoeff_index + 1].split('\t')
    xcoeff_forward = coeffs_from_pcf(degree, xlines)
    x_poly_backward = models.Polynomial2D(degree, name='x_poly_backward', **xcoeff_forward)

    ycoeff_index = lines.index('*yBackwardCoefficients 21 2')
    ylines = lines[ycoeff_index + 1].split('\t')
    ycoeff_backward = coeffs_from_pcf(degree, ylines)
    y_poly_forward = models.Polynomial2D(degree, name='y_poly_forward', **ycoeff_backward)

    ycoeff_index = lines.index('*yForwardCoefficients 21 2')
    ylines = lines[ycoeff_index + 1].split('\t')
    ycoeff_forward = coeffs_from_pcf(degree, ylines)
    y_poly_backward = models.Polynomial2D(degree, name='y_poly_backward', **ycoeff_forward)

    x_poly_forward.inverse = x_poly_backward
    y_poly_forward.inverse = y_poly_backward

    output2poly_mapping = Identity(2, name='output_mapping')
    output2poly_mapping.inverse = Mapping([0, 1, 0, 1])
    input2poly_mapping = Mapping([0, 1, 0, 1], name='input_mapping')
    input2poly_mapping.inverse = Identity(2)

    model_poly = input2poly_mapping | (x_poly_forward & y_poly_forward) | output2poly_mapping

    model = model_poly | mlinear


    f = AsdfFile()
    f.tree = ref_kw.copy()
    f.tree['model'] = model
    f.write_to(outname)
    return model_poly, mlinear
コード例 #15
0
ファイル: miri_ifu_ref_tools.py プロジェクト: hbushouse/jwst
def create_v23(reftype, detector, band, channels, data, name):
    """
    Create the transform from MIRI Local to telescope V2/V3 system for all channels.
    """
    channel = "".join([ch[0] for ch in channels])
    tree = {"detector": detector,
            "instrument" : "MIRI",
            "band": band,
            "channel": channel,
            "exp_type": "MIR_MRS",
            "pedigree": "GROUND",
            "title": "MIRI IFU model - based on CDP-4",
            "reftype": reftype,
            "author": "N. Dencheva"
            }
    ab_v23 = data[0]
    v23_ab = data[1]
    m = {}
    c0_0, c0_1, c1_0, c1_1 = ab_v23[0][1:]
    ch1_v2 = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                 name="ab_v23")
    c0_0, c0_1, c1_0, c1_1 = v23_ab[0][1:]
    ch1_a = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                name="v23_ab")

    c0_0, c0_1, c1_0, c1_1 = ab_v23[1][1:]
    ch1_v3 = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                 name="ab_v23")
    c0_0, c0_1, c1_0, c1_1 = v23_ab[1][1:]
    ch1_b = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                name="v23_ab")
    c0_0, c0_1, c1_0, c1_1 = ab_v23[2][1:]
    ch2_v2 = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                 name="ab_v23")
    c0_0, c0_1, c1_0, c1_1 = v23_ab[2][1:]
    ch2_a = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                name="v23_ab")

    c0_0, c0_1, c1_0, c1_1 = ab_v23[3][1:]
    ch2_v3 = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                 name="ab_v23")
    c0_0, c0_1, c1_0, c1_1 = v23_ab[3][1:]
    ch2_b = models.Polynomial2D(2, c0_0=c0_0, c1_0=c1_0, c0_1=c0_1, c1_1=c1_1,
                                name="v23_ab")
    ch1_for =  ch1_v2 & ch1_v3
    ch2_for = ch2_v2 & ch2_v3
    ch1_for.inverse =  ch1_a & ch1_b
    ch2_for.inverse =  ch2_a & ch2_b
    m[channels[0]] = ch1_for
    m[channels[1]] = ch2_for
    tree['model'] = m

    f = AsdfFile()
    f.tree = tree
    f.write_to(name)
コード例 #16
0
ファイル: nirspec.py プロジェクト: philhodge/jwst
def gwa_to_ifuslit(slits, disperser, wrange, order, reference_files):
    """
    GWA to SLIT transform.

    Parameters
    ----------
    slits : list
        A list of slit IDs for all IFU slits 0-29.
    disperser : dict
        A corrected disperser ASDF object.
    filter : str
        The filter used.
    grating : str
        The grating used in the observation.
    reference_files: dict
        Dictionary with reference files returned by CRDS.

    Returns
    -------
    model : `~jwst.transforms.Gwa2Slit` model.
        Transform from GWA frame to SLIT frame.
   """
    ymin = -.55
    ymax = .55
    agreq = AngleFromGratingEquation(disperser['groove_density'], order, name='alpha_from_greq')
    lgreq = WavelengthFromGratingEquation(disperser['groove_density'], order, name='lambda_from_greq')
    collimator2gwa = collimator_to_gwa(reference_files, disperser)
    mask = mask_slit(ymin, ymax)

    ifuslicer = AsdfFile.open(reference_files['ifuslicer'])
    ifupost = AsdfFile.open(reference_files['ifupost'])
    slit_models = {}
    ifuslicer_model = ifuslicer.tree['model']
    for slit in slits:
        slitdata = ifuslicer.tree['data'][slit]
        slitdata_model = get_slit_location_model(slitdata)
        ifuslicer_transform = (slitdata_model | ifuslicer_model)
        ifupost_transform = ifupost.tree[slit]['model']
        msa2gwa = ifuslicer_transform | ifupost_transform | collimator2gwa
        gwa2msa = gwa_to_ymsa(msa2gwa)# TODO: Use model sets here
        bgwa2msa = Mapping((0, 1, 0, 1), n_inputs=3) | \
                 Const1D(0) * Identity(1) & Const1D(-1) * Identity(1) & Identity(2) | \
                 Identity(1) & gwa2msa & Identity(2) | \
                 Mapping((0, 1, 0, 1, 2, 3)) | Identity(2) & msa2gwa & Identity(2) | \
                 Mapping((0, 1, 2, 5), n_inputs=7) | Identity(2) & lgreq | mask

        # msa to before_gwa
        #msa2bgwa = Mapping((0, 1, 2, 2)) | msa2gwa & Identity(1) | Mapping((3, 0, 1, 2)) | agreq
        msa2bgwa = msa2gwa & Identity(1) | Mapping((3, 0, 1, 2)) | agreq
        bgwa2msa.inverse = msa2bgwa
        slit_models[slit] = bgwa2msa

    ifuslicer.close()
    ifupost.close()
    return Gwa2Slit(slit_models)
コード例 #17
0
ファイル: nirspec.py プロジェクト: hbushouse/jwst
def imaging(input_model, reference_files):
    """
    Imaging pipeline

    frames : detector, gwa, msa, sky
    """
    # Get the corrected disperser model
    disperser = get_disperser(input_model, reference_files['disperser'])

    # DETECTOR to GWA transform
    det2gwa = detector_to_gwa(reference_files, input_model.meta.instrument.detector, disperser)

    gwa_through = Const1D(-1) * Identity(1) & Const1D(-1) * Identity(1) & Identity(1)

    angles = [ disperser['theta_x'], disperser['theta_y'],
               disperser['theta_z'], disperser['tilt_y']]
    rotation = Rotation3DToGWA(angles, axes_order="xyzy", name='rotaton').inverse
    dircos2unitless = DirCos2Unitless(name='directional_cosines2unitless')

    col = AsdfFile.open(reference_files['collimator']).tree['model']

    # Get the default spectral order and wavelength range and record them in the model.
    sporder, wrange = get_spectral_order_wrange(input_model, reference_files['wavelengthrange'])
    input_model.meta.wcsinfo.waverange_start = wrange[0]
    input_model.meta.wcsinfo.waverange_end = wrange[1]
    input_model.meta.wcsinfo.spectral_order = sporder

    lam = wrange[0] + (wrange[1] - wrange[0]) * .5

    lam_model = Mapping((0, 1, 1)) | Identity(2) & Const1D(lam)

    gwa2msa = gwa_through | rotation | dircos2unitless | col | lam_model
    gwa2msa.inverse = col.inverse | dircos2unitless.inverse | rotation.inverse | gwa_through

    # MSA to OTEIP transform
    msa2ote = msa_to_oteip(reference_files)
    msa2oteip = msa2ote | Mapping((0, 1), n_inputs=3)
    msa2oteip.inverse = Mapping((0, 1, 0, 1)) | msa2ote.inverse | Mapping((0, 1), n_inputs=3)
    # OTEIP to V2,V3 transform
    with AsdfFile.open(reference_files['ote']) as f:
        oteip2v23 = f.tree['model'].copy()

    # Create coordinate frames in the NIRSPEC WCS pipeline
    # "detector", "gwa", "msa", "oteip", "v2v3", "world"
    det, gwa, msa_frame, oteip, v2v3 = create_imaging_frames()

    imaging_pipeline = [(det, det2gwa),
                    (gwa,  gwa2msa),
                    (msa_frame, msa2oteip),
                    (oteip, oteip2v23),
                    (v2v3, None)]

    return imaging_pipeline
コード例 #18
0
ファイル: miri.py プロジェクト: stscieisenhamer/jwst
def imaging_distortion(input_model, reference_files):
    """
    Create pixe2sky and sky2pixel transformation for the MIRI imager.

    Parameters
    ----------
    model : jwst.datamodels.ImagingModel
        input model
    reference_files : dict
        reference files from CRDS


    using CDP 3 Reference distortion file
        Old one: ~~MIRI_FM_MIRIMAGE_F1000W_PSF_03.01.00.fits~~
    Current one: MIRI_FM_MIRIMAGE_DISTORTION_06.03.00.fits

    reference files/corrections needed (pixel to sky):

    1. Filter dependent shift in (x,y) (!with an oposite sign to that delievred by the IT)
    2. Apply MI
    3. Apply Ai and BI matrices
    4. Apply the TI matrix (this gives V2/V3 coordinates)
    5. Apply V2/V3 to sky transformation

    ref_file: filter_offset.asdf - (1)
    ref_file: distortion.asdf -(2,3,4)
    """

    # Load the distortion and filter from the reference files.

    # Load in the distortion file.
    distortion = AsdfFile.open(reference_files['distortion']).tree['model']
    filter_offset = AsdfFile.open(reference_files['filteroffset']).tree[input_model.meta.instrument.filter]

    # Now apply each of the models.  The Scale(60) converts from arc-minutes to deg.
    full_distortion = models.Shift(filter_offset['column_offset']) & models.Shift(
        filter_offset['row_offset']) | distortion | models.Scale(1/60) & models.Scale(1/60)


    # ToDo: This will likely have to change in the future, but the "filteroffset" file we have
    # ToDo: currently does not contain that key.
    filter_offset = None
    if input_model.meta.instrument.filter in  AsdfFile.open(reference_files['filteroffset']).tree:
        filter_offset = AsdfFile.open(reference_files['filteroffset']).tree[input_model.meta.instrument.filter]
        full_distortion = models.Shift(filter_offset['row_offset']) & models.Shift(
            filter_offset['column_offset']) | distortion
    else:
        full_distortion = distortion

    full_distortion = full_distortion.rename('distortion')

    return full_distortion
コード例 #19
0
ファイル: miri_ifu_ref_tools.py プロジェクト: hbushouse/jwst
def create_distortion_file(reftype, detector,  band, channel, data, name):

    tree = create_reffile_header(reftype, detector, band, channel)

    adata, bdata, xdata, ydata, sdata1, sdata2 = data
    tree['alpha_model'] = adata
    tree['beta_model'] = bdata
    tree['x_model'] = xdata
    tree['y_model'] = ydata
    tree['slice_model'] = {str(channel[0])+band: sdata1, str(channel[1])+band: sdata2}
    f = AsdfFile()
    f.tree = tree
    f.write_to(name)
コード例 #20
0
ファイル: plugin.py プロジェクト: spacetelescope/asdf
    def runtest(self):
        from asdf import AsdfFile, block, util
        from asdf.tests import helpers
        from .extension import TestExtension

        name, version = parse_schema_filename(self.filename)
        if should_skip(name, version):
            return

        standard_version = self._find_standard_version(name, version)

        # Make sure that the examples in the schema files (and thus the
        # ASDF standard document) are valid.
        buff = helpers.yaml_to_asdf(
            'example: ' + self.example.strip(), standard_version=standard_version)
        ff = AsdfFile(
            uri=util.filepath_to_url(os.path.abspath(self.filename)),
            extensions=TestExtension())

        # Fake an external file
        ff2 = AsdfFile({'data': np.empty((1024*1024*8), dtype=np.uint8)})

        ff._external_asdf_by_uri[
            util.filepath_to_url(
                os.path.abspath(
                    os.path.join(
                        os.path.dirname(self.filename), 'external.asdf')))] = ff2

        # Add some dummy blocks so that the ndarray examples work
        for i in range(3):
            b = block.Block(np.zeros((1024*1024*8), dtype=np.uint8))
            b._used = True
            ff.blocks.add(b)
        b._array_storage = "streamed"

        try:
            with pytest.warns(None) as w:
                import warnings
                ff._open_impl(ff, buff, mode='rw')
            # Do not tolerate any warnings that occur during schema validation
            assert len(w) == 0, helpers.display_warnings(w)
        except Exception:
            print("From file:", self.filename)
            raise

        # Just test we can write it out.  A roundtrip test
        # wouldn't always yield the correct result, so those have
        # to be covered by "real" unit tests.
        if b'external.asdf' not in buff.getvalue():
            buff = io.BytesIO()
            ff.write_to(buff)
コード例 #21
0
def fpa2asdf(fpafile, outname, ref_kw):
    """
    Create an asdf reference file with the FPA description.

    The CDP2 delivery includes a fits file - "FPA.fpa" which is the
    input to this function. This file is converted to asdf and is a
    reference file of type "FPA".

    nirspec_fs_ref_tools.fpa2asdf('Ref_Files/CoordTransform/Description/FPA.fpa', 'fpa.asdf')

    Parameters
    ----------
    fpafile : str
        A fits file with FPA description (FPA.fpa)
    outname : str
        Name of output ASDF file.
    """
    with open(fpafile) as f:
        lines = [l.strip() for l in f.readlines()]

    # NRS1
    ind = lines.index("*SCA491_PitchX")
    scalex_nrs1 = models.Scale(1/float(lines[ind+1]), name='fpa_scale_x')
    ind = lines.index("*SCA491_PitchY")
    scaley_nrs1 = models.Scale(1/float(lines[ind+1]), name='fpa_scale_y')
    ind = lines.index("*SCA491_RotAngle")
    rot_nrs1 = models.Rotation2D(np.rad2deg(-float(lines[ind+1])), name='fpa_rotation')
    ind = lines.index("*SCA491_PosX")
    shiftx_nrs1 = models.Shift(-float(lines[ind+1]), name='fpa_shift_x')
    ind = lines.index("*SCA491_PosY")
    shifty_nrs1 = models.Shift(-float(lines[ind+1]), name='fpa_shift_y')

    # NRS2
    ind = lines.index("*SCA492_PitchX")
    scalex_nrs2 = models.Scale(1/float(lines[ind+1]), name='fpa_scale_x')
    ind = lines.index("*SCA492_PitchY")
    scaley_nrs2 = models.Scale(1/float(lines[ind+1]), name='fpa_scale_y')
    ind = lines.index("*SCA492_RotAngle")
    rot_nrs2 = models.Rotation2D(np.rad2deg(float(lines[ind+1])), name='fpa_rotation')
    ind = lines.index("*SCA492_PosX")
    shiftx_nrs2 = models.Shift(-float(lines[ind+1]), name='fpa_shift_x')
    ind = lines.index("*SCA492_PosY")
    shifty_nrs2 = models.Shift(-float(lines[ind+1]), name='fpa_shift_y')
    tree = ref_kw.copy()
    tree['NRS1'] = (shiftx_nrs1 & shifty_nrs1) | rot_nrs1 | (scalex_nrs1 & scaley_nrs1)
    tree['NRS2'] = (shiftx_nrs2 & shifty_nrs2) | rot_nrs2 | (scalex_nrs2 & scaley_nrs2)
    fasdf = AsdfFile()
    fasdf.tree = tree
    fasdf.write_to(outname)
    return fasdf
コード例 #22
0
ファイル: nirspec.py プロジェクト: philhodge/jwst
def slit_to_msa(slits_id, msafile):
    """
    The transform from slit_frame to msa_frame.

    Parameters
    ----------
    slits_id : list
        A list of slit IDs for all open shutters/slitlets.
    msafile : str
        The name of the msa reference file.

    Returns
    -------
    model : `~jwst.transforms.Slit2Msa` model.
        Transform from slit_frame to msa_frame.
    """
    msa = AsdfFile.open(msafile)
    models = {}
    for i in range(1, 6):
        slit_names = slits_id[slits_id[:, 0] == i]
        if slit_names.any():
            msa_model = msa.tree[i]['model']
            for slit in slit_names:
                index = slit[1] - 1
                slitdata = msa.tree[slit[0]]['data'][index]
                slitdata_model = get_slit_location_model(slitdata)
                msa_transform = slitdata_model | msa_model
                s = slitid_to_slit(np.array([slit]))[0]
                models[s] = msa_transform
    msa.close()
    return Slit2Msa(models)
コード例 #23
0
ファイル: nirspec.py プロジェクト: philhodge/jwst
def collimator_to_gwa(reference_files, disperser):
    """
    Transform from COLLIMATOR to GWA frame.

    Parameters
    ----------
    reference_files: dict
        Dictionary with reference files returned by CRDS.
    disperser : dict
        A corrected disperser ASDF object.

    Returns
    -------
    model : `~astropy.modeling.core.Model` model.
        Transform from COLLIMATOR to GWA frame.

    """
    with AsdfFile.open(reference_files['collimator']) as f:
        collimator = f.tree['model'].copy()
    angles = [disperser['theta_x'], disperser['theta_y'],
              disperser['theta_z'], disperser['tilt_y']]
    rotation = Rotation3DToGWA(angles, axes_order="xyzy", name='rotaton')
    u2dircos = Unitless2DirCos(name='unitless2directional_cosines')

    return collimator.inverse | u2dircos | rotation
コード例 #24
0
ファイル: nirspec.py プロジェクト: sosey/jwst
def oteip_to_v23(reference_files):
    """
    Transform from the OTEIP frame to the V2V3 frame.

    Parameters
    ----------
    reference_files: dict
        Dictionary with reference files returned by CRDS.

    Returns
    -------
    model : `~astropy.modeling.core.Model` model.
        Transform from OTEIP to V2V3.

    """
    with AsdfFile.open(reference_files['ote']) as f:
        ote = f.tree['model'].copy()
    fore2ote_mapping = Identity(3, name='fore2ote_mapping')
    fore2ote_mapping.inverse = Mapping((0, 1, 2, 2))
    # Create the transform to v2/v3/lambda.  The wavelength units up to this point are
    # meters as required by the pipeline but the desired output wavelength units is microns.
    # So we are going to Scale the spectral units by 1e6 (meters -> microns)
    # The spatial units are currently in deg. Convertin to arcsec.
    oteip_to_xyan = fore2ote_mapping | (ote & Scale(1e6))
    # Add a shift for the aperture.
    oteip2v23 = oteip_to_xyan | Identity(1) & (Shift(468 / 3600) | Scale(-1)) & Identity(1)

    return oteip2v23
コード例 #25
0
ファイル: nirspec.py プロジェクト: sosey/jwst
def slit_to_msa(open_slits, msafile):
    """
    The transform from slit_frame to msa_frame.

    Parameters
    ----------
    open_slits : list
        A list of slit IDs for all open shutters/slitlets.
    msafile : str
        The name of the msa reference file.

    Returns
    -------
    model : `~jwst.transforms.Slit2Msa` model.
        Transform from slit_frame to msa_frame.
    """
    msa = AsdfFile.open(msafile)
    models = []
    for quadrant in range(1, 6):
        slits_in_quadrant = [s for s in open_slits if s.quadrant==quadrant]
        if any(slits_in_quadrant):
            msa_data = msa.tree[quadrant]['data']
            msa_model = msa.tree[quadrant]['model']
            for slit in slits_in_quadrant:
                slit_id = slit.shutter_id
                slitdata = msa_data[slit_id]
                slitdata_model = get_slit_location_model(slitdata)
                msa_transform = slitdata_model | msa_model
                models.append(msa_transform)
    msa.close()
    return Slit2Msa(open_slits, models)
コード例 #26
0
ファイル: nirspec.py プロジェクト: philhodge/jwst
def get_spectral_order_wrange(input_model, wavelengthrange_file):
    """
    Read the spectral order and wavelength range from the reference file.

    Parameters
    ----------
    filter : str
        The filter used.
    grating : str
        The grating used in the observation.
    wavelength_range_file : str
        Reference file of type "wavelengthrange".
    """
    full_range = [.6e-6, 5.3e-6]

    filter = input_model.meta.instrument.filter
    lamp = input_model.meta.instrument.lamp_state
    grating = input_model.meta.instrument.grating

    wave_range = AsdfFile.open(wavelengthrange_file)
    if filter == "OPAQUE":
        keyword = lamp + '_' + grating
    else:
        keyword = filter + '_' + grating
    try:
        order = wave_range.tree['filter_grating'][keyword]['order']
        wrange = wave_range.tree['filter_grating'][keyword]['range']
    except KeyError:
        order = -1
        wrange = full_range
        log.warning("Combination {0} missing in wavelengthrange file, setting order to -1 and range to {1}.".format(keyword, full_range))
    wave_range.close()
    return order, wrange
コード例 #27
0
ファイル: miri.py プロジェクト: stscirij/jwst
def get_wavelength_range(input_model, path=None):
    """
    Return the wavelength range used for computing the WCS.

    Needs access to the reference file used to construct the WCS object.

    Parameters
    ----------
    input_model : `jwst.datamodels.ImagingModel`
        Data model after assign_wcs has been run.
    path : str
        Directory where the reference file is. (optional)
    """
    fname = input_model.meta.ref_file.wavelengthrange.name.split('/')[-1]
    if path is None and not os.path.exists(fname):
        raise IOError("Reference file {0} not found. Please specify a path.".format(fname))
    else:
        fname = os.path.join(path, fname)
        f = AsdfFile.open(fname)

    wave_range = f.tree['wavelengthrange'].copy()
    wave_channels = f.tree['channels']
    f.close()

    wr = dict(zip(wave_channels, wave_range))
    channel = input_model.meta.instrument.channel
    band = input_model.meta.instrument.band

    return dict([(ch+band, wr[ch+band]) for ch in channel ])
コード例 #28
0
ファイル: miri_ifu_ref_tools.py プロジェクト: hbushouse/jwst
def create_wavelengthrange_file(name):
    f = AsdfFile()
    #wavelengthrange = {'1SHORT': (4.88, 5.77),
                        #'1MEDIUM': (5.64, 6.67),
                        #'1LONG': (6.50, 7.70),
                        #'2SHORT': (7.47, 8.83),
                        #'2MEDIUM': (8.63, 10.19),
                        #'2LONG': (9.96, 11.77),
                        #'3SHORT': (11.49, 13.55),
                        #'3MEDIUM': (13.28, 15.66),
                        #'3LONG': (15.34, 18.09),
                        #'4SHORT': (17.60, 21.00),
                        #'4MEDIUM': (20.51, 24.48),
                        #'4LONG': (23.92, 28.55)
                        #}
    # Relaxing the range to match the distortion. The table above
    # comes from the report and is "as designed".
    wavelengthrange = {'1SHORT': (4.68, 5.97),
                        '1MEDIUM': (5.24, 6.87),
                        '1LONG': (6.2, 7.90),
                        '2SHORT': (7.27, 9.03),
                        '2MEDIUM': (8.43, 10.39),
                        '2LONG': (9.76, 11.97),
                        '3SHORT': (11.29, 13.75),
                        '3MEDIUM': (13.08, 15.86),
                        '3LONG': (15.14, 18.29),
                        '4SHORT': (17.40, 21.20),
                        '4MEDIUM': (20.31, 24.68),
                        '4LONG': (23.72, 28.75)
                        }
    channels = ['1SHORT', '1MEDIUM', '1LONG', '2SHORT', '2MEDIUM', '2LONG',
                '3SHORT', '3MEDIUM', '3LONG', '4SHORT', '4MEDIUM', '4LONG']
    tree = {
            "instrument": "MIRI",
            "exp_type": "MIR_MRS",
            "pedigree": "GROUND",
            "title": "MIRI IFU model - based on CDP-4",
            "reftype": "WAVELENGTHRANGE",
            "author": "N. Dencheva"
            }
    tree['channels'] = channels
    f.tree = tree
    vr = np.empty((12, 2), dtype=np.float)
    for i, ch in enumerate(channels):
        vr[i] = wavelengthrange[ch]
    f.tree['wavelengthrange'] = vr
    f.write_to(name)
コード例 #29
0
ファイル: miri_imager_ref_tools.py プロジェクト: nden/jwst
def create_miri_imager_filter_offset(distfile, outname):
    """
    Create an asdf reference file with the filter offsets for the MIRI imager.

    Note: The IDT supplied distortion file lists sky to pixel as the
    forward transform. Since "forward" in the JWST pipeline is from
    pixel to sky, the offsets are taken with the opposite sign.

    Parameters
    ----------
    distfile : str
        MIRI imager DISTORTION file provided by the IDT team.
    outname : str
        Name of reference file to be wriiten to disk.

    Returns
    -------
    fasdf : AsdfFile
        AsdfFile object

    Examples
    -------
    >>> create_miri_imager_filer_offset('MIRI_FM_MIRIMAGE_DISTORTION_03.02.00.fits',
                                        'jwst_miri_filter_offset_0001.asdf')
    """

    with fits.open(distfile) as f:
        data = f[9].data

    d = dict.fromkeys(data.field('FILTER'))
    for i in data:
        d[i[0]] = {'column_offset': -i[1], 'row_offset': -i[2]}
    tree = {"title": "MIRI imager filter offset - CDP4",
            "reftype": "FILTEROFFSET",
            "instrument": "MIRI",
            "detector": "MIRIMAGE",
            "pedigree": "GROUND",
            "author": "N. Dencheva",
            "exp_type": "MIR_IMAGE"
            }
    tree.update(d)
    f = AsdfFile()
    f.tree = tree
    f.write_to(outname)
コード例 #30
0
ファイル: segmentationmap.py プロジェクト: SKIRT/PTS
    def saveto(self, path, header=None):

        """
        This function ...
        :param path:
        :param header:
        :return:
        """

        # If a header is not specified, created it from the WCS
        if header is None: header = self.header

        # FITS format
        if path.endswith(".fits"):

            from .fits import write_frame  # Import here because io imports SegmentationMap

            # Write to a FITS file
            write_frame(self._data, header, path)

        # ASDF format
        elif path.endswith(".asdf"):

            # Import
            from asdf import AsdfFile

            # Create the tree
            tree = dict()

            tree["data"] = self._data
            tree["header"] = header

            # Create the asdf file
            ff = AsdfFile(tree)

            # Write
            ff.write_to(path)

        # Invalid
        else: raise ValueError("Only the FITS or ASDF filetypes are supported")

        # Update the path
        self.path = path
コード例 #31
0
ファイル: container.py プロジェクト: stsci-hack/jwst
 def copy(self, memo=None):
     """
     Returns a deep copy of the models in this model container.
     """
     result = self.__class__(init=None,
                             pass_invalid_values=self._pass_invalid_values,
                             strict_validation=self._strict_validation)
     instance = copy.deepcopy(self._instance, memo=memo)
     result._asdf = AsdfFile(instance)
     result._instance = instance
     result._iscopy = self._iscopy
     result._schema = result._schema
     result._ctx = result
     for m in self._models:
         if isinstance(m, model_base.DataModel):
             result.append(m.copy())
         else:
             result.append(m)
     return result
コード例 #32
0
ファイル: miri_ifu_ref_tools.py プロジェクト: rij/jwst
def create_wavelengthrange_file(name):
    f = AsdfFile()
    #wavelengthrange = {'1SHORT': (4.88, 5.77),
    #'1MEDIUM': (5.64, 6.67),
    #'1LONG': (6.50, 7.70),
    #'2SHORT': (7.47, 8.83),
    #'2MEDIUM': (8.63, 10.19),
    #'2LONG': (9.96, 11.77),
    #'3SHORT': (11.49, 13.55),
    #'3MEDIUM': (13.28, 15.66),
    #'3LONG': (15.34, 18.09),
    #'4SHORT': (17.60, 21.00),
    #'4MEDIUM': (20.51, 24.48),
    #'4LONG': (23.92, 28.55)
    #}
    # Relaxing the range to match the distortion. The table above
    # comes from the report and is "as designed".
    wavelengthrange = {
        '1SHORT': (4.68, 5.97),
        '1MEDIUM': (5.24, 6.87),
        '1LONG': (6.2, 7.90),
        '2SHORT': (7.27, 9.03),
        '2MEDIUM': (8.43, 10.39),
        '2LONG': (9.76, 11.97),
        '3SHORT': (11.29, 13.75),
        '3MEDIUM': (13.08, 15.86),
        '3LONG': (15.14, 18.29),
        '4SHORT': (17.40, 21.20),
        '4MEDIUM': (20.31, 24.68),
        '4LONG': (23.72, 28.75)
    }
    channels = [
        '1SHORT', '1MEDIUM', '1LONG', '2SHORT', '2MEDIUM', '2LONG', '3SHORT',
        '3MEDIUM', '3LONG', '4SHORT', '4MEDIUM', '4LONG'
    ]
    tree = {
        "instrument": "MIRI",
        "exp_type": "MIR_MRS",
        "pedigree": "GROUND",
        "title": "MIRI IFU model - based on CDP-4",
        "reftype": "WAVELENGTHRANGE",
        "author": "N. Dencheva"
    }
    tree['channels'] = channels
    f.tree = tree
    vr = np.empty((12, 2), dtype=np.float)
    for i, ch in enumerate(channels):
        vr[i] = wavelengthrange[ch]
    f.tree['wavelengthrange'] = vr
    f.write_to(name)
コード例 #33
0
ファイル: container.py プロジェクト: stsci-hack/jwst
    def __init__(self, init=None, asn_exptypes=None, asn_n_members=None, **kwargs):

        super().__init__(init=None, asn_exptypes=None, **kwargs)

        self._models = []
        self.asn_exptypes = asn_exptypes
        self.asn_n_members = asn_n_members
        self._memmap = kwargs.get("memmap", False)

        if init is None:
            # Don't populate the container with models
            pass
        elif isinstance(init, fits.HDUList):
            self._models.append([datamodel_open(init, memmap=self._memmap)])
        elif isinstance(init, list):
            if all(isinstance(x, (str, fits.HDUList, model_base.DataModel)) for x in init):
                # Try opening the list as datamodels
                try:
                    init = [datamodel_open(m, memmap=self._memmap) for m in init]
                except (FileNotFoundError, ValueError):
                    raise
            else:
                raise TypeError("list must contain items that can be opened "
                                "with jwst.datamodels.open()")
            self._models = init
        elif isinstance(init, self.__class__):
            instance = copy.deepcopy(init._instance)
            self._schema = init._schema
            self._shape = init._shape
            self._asdf = AsdfFile(instance)
            self._instance = instance
            self._ctx = self
            self.__class__ = init.__class__
            self._models = init._models
        elif is_association(init):
            self.from_asn(init)
        elif isinstance(init, str):
            init_from_asn = self.read_asn(init)
            self.from_asn(init_from_asn, asn_file_path=init)
        else:
            raise TypeError('Input {0!r} is not a list of DataModels or '
                            'an ASN file'.format(init))
コード例 #34
0
ファイル: model_base.py プロジェクト: Guang91/jwst
    def open_asdf(init=None,
                  ignore_version_mismatch=True,
                  ignore_unrecognized_tag=False,
                  **kwargs):
        """
        Open an asdf object from a filename or create a new asdf object
        """
        if isinstance(init, str):
            if s3_utils.is_s3_uri(init):
                init = s3_utils.get_object(init)
            asdffile = asdf.open(init,
                                 ignore_version_mismatch=ignore_version_mismatch,
                                 ignore_unrecognized_tag=ignore_unrecognized_tag)

        else:
            asdffile = AsdfFile(init,
                            ignore_version_mismatch=ignore_version_mismatch,
                            ignore_unrecognized_tag=ignore_unrecognized_tag
                            )
        return asdffile
コード例 #35
0
def msa_to_oteip(reference_files):
    """
    Transform from the MSA frame to the OTEIP frame.

    Parameters
    ----------
    reference_files: dict
        Dictionary with reference files returned by CRDS.

    Returns
    -------
    model : `~astropy.modeling.core.Model` model.
        Transform from MSA to OTEIP.

    """
    with AsdfFile.open(reference_files['fore']) as f:
        fore = f.tree['model'].copy()
    msa2fore_mapping = Mapping((0, 1, 2, 2), name='msa2fore_mapping')
    msa2fore_mapping.inverse = Identity(3)
    return msa2fore_mapping | (fore & Identity(1))
コード例 #36
0
ファイル: test_transform.py プロジェクト: kristinelam/astropy
def test_window_orthopoly(tmpdir):
    model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5,
                                    domain=[-2, 2], window=[-0.5, 0.5])
    model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
                                    x_domain=[-2, 2], y_domain=[-2, 2],
                                    x_window=[-0.5, 0.5], y_window=[-0.1, 0.5])
    fa = AsdfFile()
    fa.tree['model1d'] = model1d
    fa.tree['model2d'] = model2d

    file_path = str(tmpdir.join('orthopoly_window.asdf'))
    fa.write_to(file_path)
    with asdf.open(file_path) as f:
        assert f.tree['model1d'](1.8) == model1d(1.8)
        assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
コード例 #37
0
def oteip_to_v23(reference_files):
    """
    Transform from the OTEIP frame to the V2V3 frame.

    Parameters
    ----------
    reference_files: dict
        Dictionary with reference files returned by CRDS.

    Returns
    -------
    model : `~astropy.modeling.core.Model` model.
        Transform from OTEIP to V2V3.

    """
    with AsdfFile.open(reference_files['ote']) as f:
        ote = f.tree['model'].copy()
    fore2ote_mapping = Identity(3, name='fore2ote_mapping')
    fore2ote_mapping.inverse = Mapping((0, 1, 2, 2))
    # Convert the wavelength to microns
    return fore2ote_mapping | (ote & Identity(1) / Const1D(1e-6))
コード例 #38
0
def get_disperser(input_model, disperserfile):
    """
    Return the disperser information corrected for the uncertainty in the GWA position.

    Parameters
    ----------
    input_model : `jwst_lib.models.DataModel`
        The input data model - either an ImageModel or a CubeModel.
    disperserfile : str
        The name of the disperser reference file.

    Returns
    -------
    disperser : dict
        The corrected disperser information.
    """
    with AsdfFile.open(disperserfile) as f:
        disperser = f.tree
    xtilt = input_model.meta.instrument.gwa_xtilt
    ytilt = input_model.meta.instrument.gwa_ytilt
    disperser = correct_tilt(disperser, xtilt, ytilt)
    return disperser
コード例 #39
0
ファイル: model_base.py プロジェクト: zonca/jwst
    def open_asdf(init=None,
                  extensions=None,
                  ignore_version_mismatch=True,
                  ignore_unrecognized_tag=False,
                  **kwargs):
        """
        Open an asdf object from a filename or create a new asdf object
        """
        if isinstance(init, str):
            asdffile = asdf.open(
                init,
                extensions=extensions,
                ignore_version_mismatch=ignore_version_mismatch,
                ignore_unrecognized_tag=ignore_unrecognized_tag)

        else:
            asdffile = AsdfFile(
                init,
                extensions=extensions,
                ignore_version_mismatch=ignore_version_mismatch,
                ignore_unrecognized_tag=ignore_unrecognized_tag)
        return asdffile
コード例 #40
0
ファイル: schema_tester.py プロジェクト: LandingEllipse/asdf
    def runtest(self):
        standard_version = self._find_standard_version()

        # Make sure that the examples in the schema files (and thus the
        # ASDF standard document) are valid.
        buff = helpers.yaml_to_asdf('example: ' + self.example.strip(),
                                    standard_version=standard_version)
        ff = AsdfFile(uri=util.filepath_to_url(os.path.abspath(self.filename)),
                      extensions=TestExtension())

        # Fake an external file
        ff2 = AsdfFile({'data': np.empty((1024 * 1024 * 8), dtype=np.uint8)})

        ff._external_asdf_by_uri[util.filepath_to_url(
            os.path.abspath(
                os.path.join(os.path.dirname(self.filename),
                             'external.asdf')))] = ff2

        # Add some dummy blocks so that the ndarray examples work
        for i in range(3):
            b = block.Block(np.zeros((1024 * 1024 * 8), dtype=np.uint8))
            b._used = True
            ff.blocks.add(b)
        b._array_storage = "streamed"

        try:
            with pytest.warns(None) as w:
                import warnings
                ff._open_impl(ff, buff)
            # Do not tolerate any warnings that occur during schema validation
            assert len(w) == 0, helpers.display_warnings(w)
        except:
            print("From file:", self.filename)
            raise

        # Just test we can write it out.  A roundtrip test
        # wouldn't always yield the correct result, so those have
        # to be covered by "real" unit tests.
        if b'external.asdf' not in buff.getvalue():
            buff = io.BytesIO()
            ff.write_to(buff)
コード例 #41
0
def create_distortion_file(reftype, detector, band, channel, data, name,
                           author, useafter, description):

    description = 'MIRI MRS Distortion Maps'
    tree = create_reffile_header(reftype, detector, band, channel, author,
                                 useafter, description)
    tree['filename'] = name
    adata, bdata, xdata, ydata, bzero, bdel = data
    tree['alpha_model'] = adata
    tree['beta_model'] = bdata
    tree['x_model'] = xdata
    tree['y_model'] = ydata
    tree['bzero'] = bzero
    tree['bdel'] = bdel

    f = AsdfFile()
    f.tree = tree
    f.add_history_entry(
        "DOCUMENT: MIRI-TN-00001-ETH; SOFTWARE: polyd2c_CDP5.pro; DATA USED: Data set of: - FM Test Campaign relevant to MRS-OPT-01, MRS-OPT-02, MRS-OPT-04, MRS-OPT-08; - CV1 Test Campaign relevant to MRS-OPT-02; - CV2 Test Campaign relevant to MRS-OPT-02; - Laboratory measurement of SPO; ============ DIFFERENCES: - New file structure: Change of Extention names and Table Column Headers.; - Replaced V2/V3 with XAN/YAN;"
    )
    f.write_to(name)
コード例 #42
0
ファイル: container.py プロジェクト: optimux/jwst
    def __init__(self, init=None, **kwargs):

        super(ModelContainer, self).__init__(init=None, **kwargs)

        self._models = []

        if init is None:
            # Don't populate the container with models
            pass
        elif isinstance(init, fits.HDUList):
            self._models.append([datamodel_open(init)])
        elif isinstance(init, list):
            if all(isinstance(x, (str, fits.HDUList)) for x in init):
                # Try opening the list of files as datamodels
                try:
                    init = [datamodel_open(m) for m in init]
                except (FileNotFoundError, ValueError):
                    raise
            elif not all(isinstance(x, model_base.DataModel) for x in init):
                raise TypeError('list must contain DataModels')
            self._models = init
        elif isinstance(init, self.__class__):
            instance = copy.deepcopy(init._instance)
            self._schema = init._schema
            self._shape = init._shape
            self._asdf = AsdfFile(instance)
            self._instance = instance
            self._ctx = self
            self.__class__ = init.__class__
            self._models = init._models
        elif is_association(init):
            self.from_asn(init)
        elif isinstance(init, str):
            init_from_asn = self.read_asn(init)
            self.from_asn(init_from_asn, asn_file_path=init)
        else:
            raise TypeError('Input {0!r} is not a list of DataModels or '
                            'an ASN file'.format(init))
コード例 #43
0
def create_wavelengthrange_file(name, detector, author, useafter, description,
                                outformat):
    f = AsdfFile()

    # Relaxing the range to match the distortion. The table above
    # comes from the report and is "as designed".
    wavelengthrange = {
        '1SHORT': (4.68, 5.97),
        '1MEDIUM': (5.24, 6.87),
        '1LONG': (6.2, 7.90),
        '2SHORT': (7.27, 9.03),
        '2MEDIUM': (8.43, 10.39),
        '2LONG': (9.76, 11.97),
        '3SHORT': (11.29, 13.75),
        '3MEDIUM': (13.08, 15.86),
        '3LONG': (15.14, 18.29),
        '4SHORT': (17.40, 21.20),
        '4MEDIUM': (20.31, 24.68),
        '4LONG': (23.72, 28.75)
    }
    channels = [
        '1SHORT', '1MEDIUM', '1LONG', '2SHORT', '2MEDIUM', '2LONG', '3SHORT',
        '3MEDIUM', '3LONG', '4SHORT', '4MEDIUM', '4LONG'
    ]

    tree = create_reffile_header("WAVELENGTHRANGE",
                                 detector,
                                 band="N/A",
                                 channel="N/A",
                                 author=author,
                                 useafter=useafter,
                                 description=description)
    tree['filename'] = name
    tree['author'] = 'David Law'
    tree['detector'] = "N/A"
    tree['channels'] = channels

    f.tree = tree
    vr = np.empty((12, 2), dtype=np.float)
    for i, ch in enumerate(channels):
        vr[i] = wavelengthrange[ch]
    f.tree['wavelengthrange'] = vr
    #    f.add_history_entry("DOCUMENT: MIRI-TN-00001-ETH; SOFTWARE: polyd2c_CDP5.pro; DATA USED: Data set of: - FM Test Campaign relevant to MRS-OPT-01, MRS-OPT-02, MRS-OPT-04, MRS-OPT-08; - CV1 Test Campaign relevant to MRS-OPT-02; - CV2 Test Campaign relevant to MRS-OPT-02; - Laboratory measurement of SPO; ============ DIFFERENCES: - New file structure: Change of Extention names and Table Column Headers.; - Replaced V2/V3 with XAN/YAN;")
    f.write_to(name)  #,all_array_storage=outformat)
コード例 #44
0
def msa2asdf(msafile, outname, ref_kw):
    """
    Create an asdf reference file with the MSA description.

    mas2asfdf("MSA.msa", "msa.asdf")

    Parameters
    ----------
    msafile : str
        A fits file with MSA description (MSA.msa)
    outname : str
        Name of output ASDF file.
    """
    f = fits.open(msafile)
    tree = ref_kw.copy()
    data = f[5].data  # SLITS and IFU
    header = f[5].header
    shiftx = models.Shift(header['SLITXREF'], name='slit_xref')
    shifty = models.Shift(header['SLITYREF'], name='slit_yref')
    slitrot = models.Rotation2D(header['SLITROT'], name='slit_rot')

    tree[5] = {}
    tree[5]['model'] = slitrot | shiftx & shifty
    tree[5]['data'] = f[5].data
    for i in range(1, 5):
        header = f[i].header
        shiftx = models.Shift(header['QUADXREF'], name='msa_xref')
        shifty = models.Shift(header['QUADYREF'], name='msa_yref')
        slitrot = models.Rotation2D(header['QUADROT'], name='msa_rot')
        tree[i] = {}
        tree[i]['model'] = slitrot | shiftx & shifty
        tree[i]['data'] = f[i].data

    f.close()
    fasdf = AsdfFile()
    fasdf.tree = tree
    fasdf.add_history_entry("Build 6")
    fasdf.write_to(outname)
    return fasdf
コード例 #45
0
def oteip_to_v23(reference_files):
    """
    Transform from the OTEIP frame to the V2V3 frame.

    Parameters
    ----------
    reference_files: dict
        Dictionary with reference files returned by CRDS.

    Returns
    -------
    model : `~astropy.modeling.core.Model` model.
        Transform from OTEIP to V2V3.

    """
    with AsdfFile.open(reference_files['ote']) as f:
        ote = f.tree['model'].copy()
    fore2ote_mapping = Identity(3, name='fore2ote_mapping')
    fore2ote_mapping.inverse = Mapping((0, 1, 2, 2))

    # Create the transform to v2/v3/lambda.  The wavelength units up to this point are
    # meters as required by the pipeline but the desired output wavelength units is microns.
    # So we are going to Scale the spectral units by 1e6 (meters -> microns)
    return fore2ote_mapping | (ote & Scale(1e6))
コード例 #46
0
ファイル: model_base.py プロジェクト: nden/stdatamodels
class DataModel(properties.ObjectNode, ndmodel.NDModel):
    """
    Base class of all of the data models.
    """

    schema_url = None
    """
    The schema URI to validate the model against.  If
    None, only basic validation of required metadata
    properties (filename, date, model_type) will occur.
    """
    def __init__(self,
                 init=None,
                 schema=None,
                 memmap=False,
                 pass_invalid_values=None,
                 strict_validation=None,
                 ignore_missing_extensions=True,
                 **kwargs):
        """
        Parameters
        ----------
        init : str, tuple, `~astropy.io.fits.HDUList`, ndarray, dict, None

            - None : Create a default data model with no shape.

            - tuple : Shape of the data array.
              Initialize with empty data array with shape specified by the.

            - file path: Initialize from the given file (FITS or ASDF)

            - readable file object: Initialize from the given file
              object

            - `~astropy.io.fits.HDUList` : Initialize from the given
              `~astropy.io.fits.HDUList`.

            - A numpy array: Used to initialize the data array

            - dict: The object model tree for the data model

        schema : dict, str (optional)
            Tree of objects representing a JSON schema, or string naming a schema.
            The schema to use to understand the elements on the model.
            If not provided, the schema associated with this class
            will be used.

        memmap : bool
            Turn memmap of FITS file on or off.  (default: False).  Ignored for
            ASDF files.

        pass_invalid_values : bool or None
            If `True`, values that do not validate the schema
            will be added to the metadata. If `False`, they will be set to `None`.
            If `None`, value will be taken from the environmental PASS_INVALID_VALUES.
            Otherwise the default value is `False`.

        strict_validation : bool or None
            If `True`, schema validation errors will generate
            an exception. If `False`, they will generate a warning.
            If `None`, value will be taken from the environmental STRICT_VALIDATION.
            Otherwise, the default value is `False`.

        ignore_missing_extensions : bool
            When `False`, raise warnings when a file is read that
            contains metadata about extensions that are not available.
            Defaults to `True`.

        kwargs : dict
            Additional keyword arguments passed to lower level functions. These arguments
            are generally file format-specific. Arguments of note are:

            - FITS

              skip_fits_update - bool or None
                  `True` to skip updating the ASDF tree from the FITS headers, if possible.
                  If `None`, value will be taken from the environmental SKIP_FITS_UPDATE.
                  Otherwise, the default value is `True`.
        """

        # Override value of validation parameters if not explicitly set.
        if pass_invalid_values is None:
            pass_invalid_values = get_envar_as_boolean("PASS_INVALID_VALUES",
                                                       False)
        self._pass_invalid_values = pass_invalid_values
        if strict_validation is None:
            strict_validation = get_envar_as_boolean("STRICT_VALIDATION",
                                                     False)
        self._strict_validation = strict_validation
        self._ignore_missing_extensions = ignore_missing_extensions

        kwargs.update({'ignore_missing_extensions': ignore_missing_extensions})

        # Load the schema files
        if schema is None:
            if self.schema_url is None:
                schema = _DEFAULT_SCHEMA
            else:
                # Create an AsdfFile so we can use its resolver for loading schemas
                schema = asdf_schema.load_schema(self.schema_url,
                                                 resolve_references=True)

        self._schema = mschema.merge_property_trees(schema)

        # Provide the object as context to other classes and functions
        self._ctx = self

        # Initialize with an empty AsdfFile instance as this is needed for
        # reading in FITS files where validate._check_value() gets called, and
        # ctx needs to have an _asdf attribute.
        self._asdf = AsdfFile()

        # Determine what kind of input we have (init) and execute the
        # proper code to intiailize the model
        self._files_to_close = []
        self._iscopy = False
        is_array = False
        is_shape = False
        shape = None

        if init is None:
            asdffile = self.open_asdf(init=None, **kwargs)

        elif isinstance(init, dict):
            asdffile = self.open_asdf(init=init, **kwargs)

        elif isinstance(init, np.ndarray):
            asdffile = self.open_asdf(init=None, **kwargs)

            shape = init.shape
            is_array = True

        elif isinstance(init, tuple):
            for item in init:
                if not isinstance(item, int):
                    raise ValueError("shape must be a tuple of ints")

            shape = init
            is_shape = True
            asdffile = self.open_asdf(init=None, **kwargs)

        elif isinstance(init, DataModel):
            asdffile = None
            self.clone(self, init)
            if not isinstance(init, self.__class__):
                self.validate()
            return

        elif isinstance(init, AsdfFile):
            asdffile = init

        elif isinstance(init, fits.HDUList):
            asdffile = fits_support.from_fits(init, self._schema, self._ctx,
                                              **kwargs)

        elif isinstance(init, (str, bytes, PurePath)):
            if isinstance(init, PurePath):
                init = str(init)
            if isinstance(init, bytes):
                init = init.decode(sys.getfilesystemencoding())
            file_type = filetype.check(init)

            if file_type == "fits":
                if s3_utils.is_s3_uri(init):
                    init_fitsopen = s3_utils.get_object(init)
                    memmap = None
                else:
                    init_fitsopen = init

                hdulist = fits.open(init_fitsopen, memmap=memmap)
                asdffile = fits_support.from_fits(hdulist, self._schema,
                                                  self._ctx, **kwargs)
                self._files_to_close.append(hdulist)

            elif file_type == "asdf":
                asdffile = self.open_asdf(init=init, **kwargs)

            else:
                # TODO handle json files as well
                raise IOError(
                    "File does not appear to be a FITS or ASDF file.")

        else:
            raise ValueError("Can't initialize datamodel using {0}".format(
                str(type(init))))

        # Initialize object fields as determined from the code above
        self._shape = shape
        self._instance = asdffile.tree
        self._asdf = asdffile

        # Initalize class dependent hidden fields
        self._no_asdf_extension = False

        # Instantiate the primary array of the image
        if is_array:
            primary_array_name = self.get_primary_array_name()
            if not primary_array_name:
                raise TypeError(
                    "Array passed to DataModel.__init__, but model has "
                    "no primary array in its schema")
            setattr(self, primary_array_name, init)

        # If a shape has been given, initialize the primary array.
        if is_shape:
            primary_array_name = self.get_primary_array_name()
            if not primary_array_name:
                raise TypeError(
                    "Shape passed to DataModel.__init__, but model has "
                    "no primary array in its schema")

            # Initialization occurs when the primary array is first
            # referenced. Do so now.
            getattr(self, primary_array_name)

        # if the input is from a file, set the filename attribute
        if isinstance(init, str):
            self.meta.filename = os.path.basename(init)
        elif isinstance(init, fits.HDUList):
            info = init.fileinfo(0)
            if info is not None:
                filename = info.get('filename')
                if filename is not None:
                    self.meta.filename = os.path.basename(filename)

        # if the input model doesn't have a date set, use the current date/time
        if not self.meta.hasattr('date'):
            current_date = Time(datetime.datetime.now())
            current_date.format = 'isot'
            self.meta.date = current_date.value

        # store the data model type, if not already set
        klass = self.__class__.__name__
        if klass != 'DataModel':
            if not self.meta.hasattr('model_type'):
                self.meta.model_type = klass

        # initialize arrays from keyword arguments when they are present

        for attr, value in kwargs.items():
            if value is not None:
                subschema = properties._get_schema_for_property(
                    self._schema, attr)
                if 'datatype' in subschema:
                    setattr(self, attr, value)

    @property
    def _model_type(self):
        return self.__class__.__name__

    def __repr__(self):
        buf = ['<']
        buf.append(self._model_type)

        if self.shape:
            buf.append(str(self.shape))

        try:
            filename = self.meta.filename
        except AttributeError:
            filename = None
        if filename:
            buf.append(" from ")
            buf.append(filename)
        buf.append('>')

        return "".join(buf)

    def __del__(self):
        """Ensure closure of resources when deleted."""
        self.close()

    @property
    def override_handle(self):
        """override_handle identifies in-memory models where a filepath
        would normally be used.
        """
        # Arbitrary choice to look something like crds://
        return "override://" + self.__class__.__name__

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()

    def _drop_arrays(self):
        def _drop_array(d):
            # Walk tree and delete numpy arrays
            if isinstance(d, dict):
                for val in d.values():
                    _drop_array(val)
            elif isinstance(d, list):
                for val in d:
                    _drop_array(val)
            elif isinstance(d, np.ndarray):
                del d
            else:
                pass

        _drop_array(self._instance)

    def close(self):
        if not self._iscopy:
            if self._asdf is not None:
                self._asdf.close()
                self._drop_arrays()

            for fd in self._files_to_close:
                if fd is not None:
                    fd.close()

    @staticmethod
    def clone(target, source, deepcopy=False, memo=None):
        if deepcopy:
            instance = copy.deepcopy(source._instance, memo=memo)
            target._asdf = AsdfFile(instance)
            target._instance = instance
            target._iscopy = source._iscopy
        else:
            target._asdf = source._asdf
            target._instance = source._instance
            target._iscopy = True

        target._files_to_close = []
        target._shape = source._shape
        target._ctx = target
        target._no_asdf_extension = source._no_asdf_extension

    def copy(self, memo=None):
        """
        Returns a deep copy of this model.
        """
        result = self.__class__(init=None,
                                pass_invalid_values=self._pass_invalid_values,
                                strict_validation=self._strict_validation)
        self.clone(result, self, deepcopy=True, memo=memo)
        return result

    __copy__ = __deepcopy__ = copy

    def validate(self):
        """
        Re-validate the model instance againsst its schema
        """
        validate.value_change(str(self), self._instance, self._schema, self)

    def validate_required_fields(self):
        """
        Walk the schema and make sure all required fields are
        in the model
        """
        def callback(schema, path, combiner, ctx, recurse):
            if 'fits_required' not in schema:
                return

            # Get the value pointed at by the path to the node,
            # or None in case there is no entry for the node

            node = ctx
            for attr in path:
                node = getattr(node, attr)
                if node is None:
                    break

            validate.value_change(path, node, schema, self)

        mschema.walk_schema(self._schema, callback, ctx=self)

    def info(self, *args, **kwargs):
        return self._asdf.info(**kwargs)

    def search(self, *args, **kwargs):
        return self._asdf.search(*args, **kwargs)

    try:
        info.__doc__ = AsdfFile.info.__doc__
        search.__doc__ = AsdfFile.search.__doc__
    except AttributeError:
        pass

    def get_primary_array_name(self):
        """
        Returns the name "primary" array for this model, which
        controls the size of other arrays that are implicitly created.
        This is intended to be overridden in the subclasses if the
        primary array's name is not "data".
        """
        if properties._find_property(self._schema, 'data'):
            primary_array_name = 'data'
        else:
            primary_array_name = ''
        return primary_array_name

    def on_save(self, path=None):
        """
        This is a hook that is called just before saving the file.
        It can be used, for example, to update values in the metadata
        that are based on the content of the data.

        Override it in the subclass to make it do something, but don't
        forget to "chain up" to the base class, since it does things
        there, too.

        Parameters
        ----------
        path : str
            The path to the file that we're about to save to.
        """
        if isinstance(path, str):
            self.meta.filename = os.path.basename(path)

        current_date = Time(datetime.datetime.now())
        current_date.format = 'isot'
        self.meta.date = current_date.value

        # Enforce model_type to be the actual type of model being saved.
        self.meta.model_type = self._model_type

    def save(self, path, dir_path=None, *args, **kwargs):
        """
        Save to either a FITS or ASDF file, depending on the path.

        Parameters
        ----------
        path : string or func
            File path to save to.
            If function, it takes one argument with is
            model.meta.filename and returns the full path string.

        dir_path: string
            Directory to save to. If not None, this will override
            any directory information in the `path`

        Returns
        -------
        output_path: str
            The file path the model was saved in.
        """
        if callable(path):
            path_head, path_tail = os.path.split(path(self.meta.filename))
        else:
            path_head, path_tail = os.path.split(path)
        base, ext = os.path.splitext(path_tail)
        if isinstance(ext, bytes):
            ext = ext.decode(sys.getfilesystemencoding())

        if dir_path:
            path_head = dir_path
        output_path = os.path.join(path_head, path_tail)

        # TODO: Support gzip-compressed fits
        if ext == '.fits':
            # TODO: remove 'clobber' check once depreciated fully in astropy
            if 'clobber' not in kwargs:
                kwargs.setdefault('overwrite', True)
            self.to_fits(output_path, *args, **kwargs)
        elif ext == '.asdf':
            self.to_asdf(output_path, *args, **kwargs)
        else:
            raise ValueError("unknown filetype {0}".format(ext))

        return output_path

    @staticmethod
    def open_asdf(init=None,
                  ignore_version_mismatch=True,
                  ignore_unrecognized_tag=False,
                  **kwargs):
        """
        Open an asdf object from a filename or create a new asdf object
        """
        if isinstance(init, str):
            if s3_utils.is_s3_uri(init):
                init = s3_utils.get_object(init)
            asdffile = asdf.open(
                init,
                ignore_version_mismatch=ignore_version_mismatch,
                ignore_unrecognized_tag=ignore_unrecognized_tag)

        else:
            asdffile = AsdfFile(
                init,
                ignore_version_mismatch=ignore_version_mismatch,
                ignore_unrecognized_tag=ignore_unrecognized_tag)
        return asdffile

    @classmethod
    def from_asdf(cls, init, schema=None, **kwargs):
        """
        Load a data model from an ASDF file.

        Parameters
        ----------
        init : str, file object, `~asdf.AsdfFile`
            - str : file path: initialize from the given file
            - readable file object: Initialize from the given file object
            - `~asdf.AsdfFile` : Initialize from the given`~asdf.AsdfFile`.
        schema :
            Same as for `__init__`
        kwargs : dict
            Aadditional arguments passed to lower level functions

        Returns
        -------
        model : `~jwst.datamodels.DataModel` instance
            A data model.
        """
        return cls(init, schema=schema, **kwargs)

    def to_asdf(self, init, *args, **kwargs):
        """
        Write a data model to an ASDF file.

        Parameters
        ----------
        init : file path or file object
        args : tuple, list
            Additional positional arguments passed to `~asdf.AsdfFile.write_to`.
        kwargs : dict
            Any additional keyword arguments are passed along to
            `~asdf.AsdfFile.write_to`.
        """
        self.on_save(init)
        asdffile = self.open_asdf(self._instance, **kwargs)
        asdffile.write_to(init, *args, **kwargs)

    @classmethod
    def from_fits(cls, init, schema=None, **kwargs):
        """
        Load a model from a FITS file.

        Parameters
        ----------
        init : file path, file object, astropy.io.fits.HDUList
            - file path: Initialize from the given file
            - readable file object: Initialize from the given file object
            - astropy.io.fits.HDUList: Initialize from the given
              `~astropy.io.fits.HDUList`.

        schema : dict, str
            Same as for `__init__`

        kwargs : dict
            Aadditional arguments passed to lower level functions.

        Returns
        -------
        model : `~jwst.datamodels.DataModel`
            A data model.
        """
        return cls(init, schema=schema, **kwargs)

    def to_fits(self, init, *args, **kwargs):
        """
        Write a data model to a FITS file.

        Parameters
        ----------
        init : file path or file object

        args, kwargs
            Any additional arguments are passed along to
            `astropy.io.fits.writeto`.
        """
        self.on_save(init)

        with fits_support.to_fits(self._instance, self._schema) as ff:
            with warnings.catch_warnings():
                warnings.filterwarnings('ignore', message='Card is too long')
                if self._no_asdf_extension:
                    ff._hdulist.writeto(init, *args, **kwargs)
                else:
                    ff.write_to(init, *args, **kwargs)

    @property
    def shape(self):
        if self._shape is None:
            primary_array_name = self.get_primary_array_name()
            if primary_array_name and self.hasattr(primary_array_name):
                primary_array = getattr(self, primary_array_name)
                self._shape = primary_array.shape
        return self._shape

    def my_attribute(self, attr):
        properties = frozenset(("shape", "history", "_extra_fits", "schema"))
        return attr in properties

    def __setattr__(self, attr, value):
        if self.my_attribute(attr):
            object.__setattr__(self, attr, value)
        elif ndmodel.NDModel.my_attribute(self, attr):
            ndmodel.NDModel.__setattr__(self, attr, value)
        else:
            properties.ObjectNode.__setattr__(self, attr, value)

    def extend_schema(self, new_schema):
        """
        Extend the model's schema using the given schema, by combining
        it in an "allOf" array.

        Parameters
        ----------
        new_schema : dict
            Schema tree.
        """
        schema = {'allOf': [self._schema, new_schema]}
        self._schema = mschema.merge_property_trees(schema)
        self.validate()
        return self

    def add_schema_entry(self, position, new_schema):
        """
        Extend the model's schema by placing the given new_schema at
        the given dot-separated position in the tree.

        Parameters
        ----------
        position : str
            Dot separated string indicating the position, e.g. ``meta.instrument.name``.
        new_schema : dict
            Schema tree.
        """
        parts = position.split('.')
        schema = new_schema
        for part in parts[::-1]:
            schema = {'type': 'object', 'properties': {part: schema}}
        return self.extend_schema(schema)

    # return_result retained for backward compatibility
    def find_fits_keyword(self, keyword, return_result=True):
        """
        Utility function to find a reference to a FITS keyword in this
        model's schema.  This is intended for interactive use, and not
        for use within library code.

        Parameters
        ----------
        keyword : str
            A FITS keyword name.

        Returns
        -------
        locations : list of str
            If `return_result` is `True`, a list of the locations in
            the schema where this FITS keyword is used.  Each element
            is a dot-separated path.
        """
        from . import schema
        return schema.find_fits_keyword(self.schema, keyword)

    def search_schema(self, substring):
        """
        Utility function to search the metadata schema for a
        particular phrase.

        This is intended for interactive use, and not for use within
        library code.

        The searching is case insensitive.

        Parameters
        ----------
        substring : str
            The substring to search for.

        Returns
        -------
        locations : list of tuples
        """
        from . import schema
        return schema.search_schema(self.schema, substring)

    def __getitem__(self, key):
        """
        Get a metadata value using a dotted name.
        """
        assert isinstance(key, str)
        meta = self
        for part in key.split('.'):
            try:
                meta = getattr(meta, part)
            except AttributeError:
                raise KeyError(repr(key))
        return meta

    def get_item_as_json_value(self, key):
        """
        Equivalent to __getitem__, except returns the value as a JSON
        basic type, rather than an arbitrary Python type.
        """
        assert isinstance(key, str)
        meta = self
        parts = key.split('.')
        for part in parts:
            try:
                meta = getattr(meta, part)
            except AttributeError:
                raise KeyError(repr(key))
        return yamlutil.custom_tree_to_tagged_tree(meta, self._instance)

    def __setitem__(self, key, value):
        """
        Set a metadata value using a dotted name.
        """
        assert isinstance(key, str)
        meta = self
        parts = key.split('.')
        for part in parts[:-1]:
            try:
                part = int(part)
            except ValueError:
                try:
                    meta = getattr(meta, part)
                except AttributeError:
                    raise KeyError(repr(key))
            else:
                meta = meta[part]

        part = parts[-1]
        try:
            part = int(part)
        except ValueError:
            setattr(meta, part, value)
        else:
            meta[part] = value

    def iteritems(self):
        """
        Iterates over all of the schema items in a flat way.

        Each element is a pair (`key`, `value`).  Each `key` is a
        dot-separated name.  For example, the schema element
        `meta.observation.date` will end up in the result as::

            ("meta.observation.date": "2012-04-22T03:22:05.432")
        """
        def recurse(tree, path=[]):
            if isinstance(tree, dict):
                for key, val in tree.items():
                    for x in recurse(val, path + [key]):
                        yield x
            elif isinstance(tree, (list, tuple)):
                for i, val in enumerate(tree):
                    for x in recurse(val, path + [i]):
                        yield x
            elif tree is not None:
                yield ('.'.join(str(x) for x in path), tree)

        for x in recurse(self._instance):
            yield x

    # We are just going to define the items to return the iteritems
    items = iteritems

    def iterkeys(self):
        """
        Iterates over all of the schema keys in a flat way.

        Each result of the iterator is a `key`.  Each `key` is a
        dot-separated name.  For example, the schema element
        `meta.observation.date` will end up in the result as the
        string `"meta.observation.date"`.
        """
        for key, val in self.iteritems():
            yield key

    keys = iterkeys

    def itervalues(self):
        """
        Iterates over all of the schema values in a flat way.
        """
        for key, val in self.iteritems():
            yield val

    values = itervalues

    def update(self, d, only=None, extra_fits=False):
        """
        Updates this model with the metadata elements from another model.

        Note: The ``update`` method skips a WCS object, if present.

        Parameters
        ----------
        d : `~jwst.datamodels.DataModel` or dictionary-like object
            The model to copy the metadata elements from. Can also be a
            dictionary or dictionary of dictionaries or lists.
        only: str, None
            Update only the named hdu, e.g. ``only='PRIMARY'``. Can either be
            a string or list of hdu names. Default is to update all the hdus.
        extra_fits : boolean
            Update from ``extra_fits``.  Default is False.
        """
        def hdu_keywords_from_data(d, path, hdu_keywords):
            # Walk tree and add paths to keywords to hdu keywords
            if isinstance(d, dict):
                for key, val in d.items():
                    if len(path) > 0 or key != 'extra_fits':
                        hdu_keywords_from_data(val, path + [key], hdu_keywords)
            elif isinstance(d, list):
                for key, val in enumerate(d):
                    hdu_keywords_from_data(val, path + [key], hdu_keywords)
            elif isinstance(d, np.ndarray):
                # skip data arrays
                pass
            else:
                hdu_keywords.append(path)

        def hdu_keywords_from_schema(subschema, path, combiner, ctx, recurse):
            # Add path to keyword to hdu_keywords if in list of hdu names
            if 'fits_keyword' in subschema:
                fits_hdu = subschema.get('fits_hdu', 'PRIMARY')
                if fits_hdu in hdu_names:
                    ctx.append(path)

        def hdu_names_from_schema(subschema, path, combiner, ctx, recurse):
            # Build a set of hdu names from the schema
            hdu_name = subschema.get('fits_hdu')
            if hdu_name:
                hdu_names.add(hdu_name)

        def included(cursor, part):
            # Test if part is in the cursor
            if isinstance(part, int):
                return part >= 0 and part < len(cursor)
            else:
                return part in cursor

        def set_hdu_keyword(this_cursor, that_cursor, path):
            # Copy an element pointed to by path from that to this
            part = path.pop(0)
            if not included(that_cursor, part):
                return
            if len(path) == 0:
                this_cursor[part] = copy.deepcopy(that_cursor[part])
            else:
                that_cursor = that_cursor[part]
                if not included(this_cursor, part):
                    if isinstance(path[0], int):
                        if isinstance(part, int):
                            this_cursor.append([])
                        else:
                            this_cursor[part] = []
                    else:
                        if isinstance(part, int):
                            this_cursor.append({})
                        elif isinstance(that_cursor, list):
                            this_cursor[part] = []
                        else:
                            this_cursor[part] = {}
                this_cursor = this_cursor[part]
                set_hdu_keyword(this_cursor, that_cursor, path)

        def protected_keyword(path):
            # Some keywords are protected and
            # should not be copied frpm the other image
            if len(path) == 2:
                if path[0] == 'meta':
                    if path[1] in ('date', 'model_type'):
                        return True
            return False

        # Get the list of hdu names from the model so that updates
        # are limited to those hdus

        if only is not None:
            if isinstance(only, str):
                hdu_names = set([only])
            else:
                hdu_names = set(list(only))
        else:
            hdu_names = set(['PRIMARY'])
            mschema.walk_schema(self._schema, hdu_names_from_schema, hdu_names)

        # Get the paths to all the keywords that will be updated from

        hdu_keywords = []
        if isinstance(d, DataModel):
            schema = d._schema
            d = d._instance
            mschema.walk_schema(schema, hdu_keywords_from_schema, hdu_keywords)
        else:
            path = []
            hdu_keywords_from_data(d, path, hdu_keywords)

        # Perform the updates to the keywords mentioned in the schema
        for path in hdu_keywords:
            if not protected_keyword(path):
                set_hdu_keyword(self._instance, d, path)

        # Update from extra_fits as well, if indicated
        if extra_fits:
            for hdu_name in hdu_names:
                path = ['extra_fits', hdu_name, 'header']
                set_hdu_keyword(self._instance, d, path)

        self.validate()

    def to_flat_dict(self, include_arrays=True):
        """
        Returns a dictionary of all of the schema items as a flat dictionary.

        Each dictionary key is a dot-separated name.  For example, the
        schema element `meta.observation.date` will end up in the
        dictionary as::

            { "meta.observation.date": "2012-04-22T03:22:05.432" }

        """
        def convert_val(val):
            if isinstance(val, datetime.datetime):
                return val.isoformat()
            elif isinstance(val, Time):
                return str(val)
            return val

        if include_arrays:
            return dict(
                (key, convert_val(val)) for (key, val) in self.iteritems())
        else:
            return dict((key, convert_val(val))
                        for (key, val) in self.iteritems()
                        if not isinstance(val, np.ndarray))

    @property
    def schema(self):
        return self._schema

    def get_fileext(self):
        return 'fits'

    # TODO: This is just here for backward compatibility
    @property
    def _extra_fits(self):
        return self.extra_fits

    # TODO: For backward compatibility
    def get_section(self, name):
        return getattr(self, name)

    @property
    def history(self):
        """
        Get the history as a list of entries
        """
        return HistoryList(self._asdf)

    @history.setter
    def history(self, values):
        """
        Set a history entry.

        Parameters
        ----------
        values : list
            For FITS files this should be a list of strings.
            For ASDF files use a list of ``HistoryEntry`` object. It can be created
            with `~jwst.datamodels.util.create_history_entry`.

        """
        entries = self.history
        entries.clear()
        entries.extend(values)

    def get_fits_wcs(self, hdu_name='SCI', hdu_ver=1, key=' '):
        """
        Get a `astropy.wcs.WCS` object created from the FITS WCS
        information in the model.

        Note that modifying the returned WCS object will not modify
        the data in this model.  To update the model, use `set_fits_wcs`.

        Parameters
        ----------
        hdu_name : str, optional
            The name of the HDU to get the WCS from.  This must use
            named HDU's, not numerical order HDUs. To get the primary
            HDU, pass ``'PRIMARY'``.

        key : str, optional
            The name of a particular WCS transform to use.  This may
            be either ``' '`` or ``'A'``-``'Z'`` and corresponds to
            the ``"a"`` part of the ``CTYPEia`` cards.  *key* may only
            be provided if *header* is also provided.

        hdu_ver: int, optional
            The extension version. Used when there is more than one
            extension with the same name. The default value, 1,
            is the first.

        Returns
        -------
        wcs : `astropy.wcs.WCS` or `pywcs.WCS` object
            The type will depend on what libraries are installed on
            this system.
        """
        ff = fits_support.to_fits(self._instance, self._schema)
        hdu = fits_support.get_hdu(ff._hdulist, hdu_name, index=hdu_ver - 1)
        header = hdu.header
        return WCS(header, key=key, relax=True, fix=True)

    def set_fits_wcs(self, wcs, hdu_name='SCI'):
        """
        Sets the FITS WCS information on the model using the given
        `astropy.wcs.WCS` object.

        Note that the "key" of the WCS is stored in the WCS object
        itself, so it can not be set as a parameter to this method.

        Parameters
        ----------
        wcs : `astropy.wcs.WCS` or `pywcs.WCS` object
            The object containing FITS WCS information

        hdu_name : str, optional
            The name of the HDU to set the WCS from.  This must use
            named HDU's, not numerical order HDUs.  To set the primary
            HDU, pass ``'PRIMARY'``.
        """
        header = wcs.to_header()
        if hdu_name == 'PRIMARY':
            hdu = fits.PrimaryHDU(header=header)
        else:
            hdu = fits.ImageHDU(name=hdu_name, header=header)
        hdulist = fits.HDUList([hdu])

        ff = fits_support.from_fits(
            hdulist,
            self._schema,
            self._ctx,
            ignore_missing_extensions=self._ignore_missing_extensions)

        self._instance = properties.merge_tree(self._instance, ff.tree)

    # --------------------------------------------------------
    # These two method aliases are here for astropy.registry
    # compatibility and should not be called directly
    # --------------------------------------------------------

    read = __init__

    def write(self, path, *args, **kwargs):
        self.save(path, *args, **kwargs)

    def getarray_noinit(self, attribute):
        """Retrieve array but without initilization

        Arrays initialize when directly referenced if they had
        not previously been initialized. This circumvents the
        initialization and instead raises `AttributeError`.

        Parameters
        ----------
        attribute : str
            The attribute to retrieve.

        Returns
        -------
        value : object
           The value of the attribute.

        Raises
        ------
        AttributeError
            If the attribute does not exist.
        """
        if attribute in self.instance:
            return getattr(self, attribute)
        raise AttributeError(f'{self} has no attribute "{attribute}"')
コード例 #47
0
ファイル: niriss.py プロジェクト: stscicrawford/test_jwst_rtd
def niriss_soss(input_model, reference_files):
    """
    The NIRISS SOSS WCS pipeline.

    It includes tWO coordinate frames -
    "detector" and "world".

    It uses the "specwcs" reference file.
    """

    # Get the target RA and DEC, they will be used for setting the WCS RA
    # and DEC based on a conversation with Kevin Volk.
    try:
        target_ra = float(input_model['meta.target.ra'])
        target_dec = float(input_model['meta.target.dec'])
    except:
        # There was an error getting the target RA and DEC, so we are not going to continue.
        raise ValueError(
            'Problem getting the TARG_RA or TARG_DEC from input model {}'.
            format(input_model))

    # Define the frames
    detector = cf.Frame2D(name='detector',
                          axes_order=(0, 1),
                          unit=(u.pix, u.pix))
    spec = cf.SpectralFrame(name='spectral',
                            axes_order=(2, ),
                            unit=(u.micron, ),
                            axes_names=('wavelength', ))
    sky = cf.CelestialFrame(reference_frame=coord.ICRS(),
                            axes_names=('ra', 'dec'),
                            axes_order=(0, 1),
                            unit=(u.deg, u.deg),
                            name='sky')
    world = cf.CompositeFrame([sky, spec], name='world')

    try:
        with AsdfFile.open(reference_files['specwcs']) as wl:
            wl1 = wl.tree[1].copy()
            wl2 = wl.tree[2].copy()
            wl3 = wl.tree[3].copy()
    except Exception as e:
        raise IOError('Error reading wavelength correction from {}'.format(
            reference_files['specwcs']))

    try:
        velosys = input_model.meta.wcsinfo.velosys
    except AttributeError:
        pass
    else:
        if velosys is not None:
            velocity_corr = velocity_correction(
                input_model.meta.wcsinfo.velosys)
            wl1 = wl1 | velocity_corr
            wl2 = wl2 | velocity_corr
            wl2 = wl3 | velocity_corr
            log.info("Applied Barycentric velocity correction: {}".format(
                velocity_corr[1].amplitude.value))

    subarray2full = subarray_transform(input_model)

    # Reverse the order of inputs passed to Tabular because it's in python order in modeling.
    # Consider changing it in modelng ?
    cm_order1 = subarray2full | (Mapping((0, 1, 1, 0)) | \
                                 (Const1D(target_ra) & Const1D(target_dec) & wl1)
                                 ).rename('Order1')
    cm_order2 = subarray2full | (Mapping((0, 1, 1, 0)) | \
                                 (Const1D(target_ra) & Const1D(target_dec) & wl2)
                                 ).rename('Order2')
    cm_order3 = subarray2full | (Mapping((0, 1, 1, 0)) | \
                                 (Const1D(target_ra) & Const1D(target_dec) & wl3)
                                 ).rename('Order3')

    # Define the transforms, they should accept (x,y) and return (ra, dec, lambda)
    soss_model = NirissSOSSModel(
        [1, 2, 3],
        [cm_order1, cm_order2, cm_order3]).rename('3-order SOSS Model')

    # Define the pipeline based on the frames and models above.
    pipeline = [(detector, soss_model), (world, None)]

    return pipeline
コード例 #48
0
    def __init__(self,
                 init=None,
                 schema=None,
                 extensions=None,
                 pass_invalid_values=False):
        """
        Parameters
        ----------
        init : shape tuple, file path, file object, astropy.io.fits.HDUList, numpy array, None

            - None: A default data model with no shape

            - shape tuple: Initialize with empty data of the given
              shape

            - file path: Initialize from the given file (FITS or ASDF)

            - readable file object: Initialize from the given file
              object

            - ``astropy.io.fits.HDUList``: Initialize from the given
              `~astropy.io.fits.HDUList`.

            - A numpy array: Used to initialize the data array

            - dict: The object model tree for the data model

        schema : tree of objects representing a JSON schema, or string naming a schema, optional
            The schema to use to understand the elements on the model.
            If not provided, the schema associated with this class
            will be used.

        extensions: classes extending the standard set of extensions, optional.
            If an extension is defined, the prefix used should be 'url'.

        pass_invalid_values: If true, values that do not validate the schema can
            be read and written and only a warning will be generated
        """
        # Set the extensions
        if extensions is None:
            extensions = jwst_extensions[:]
        else:
            extensions.extend(jwst_extensions)
        self._extensions = extensions

        # Override value of pass_invalid value if environment value set
        if "PASS_INVALID_VALUES" in os.environ:
            pass_invalid_values = os.environ["PASS_INVALID_VALUES"]
            try:
                pass_invalid_values = bool(int(pass_invalid_values))
            except ValueError:
                pass_invalid_values = False

        self._pass_invalid_values = pass_invalid_values

        # Construct the path to the schema files
        filename = os.path.abspath(inspect.getfile(self.__class__))
        base_url = os.path.join(os.path.dirname(filename), 'schemas', '')

        # Load the schema files
        if schema is None:
            schema_path = os.path.join(base_url, self.schema_url)
            extension_list = asdf_extension.AsdfExtensionList(self._extensions)
            schema = asdf_schema.load_schema(
                schema_path,
                resolver=extension_list.url_mapping,
                resolve_references=True)

        self._schema = mschema.flatten_combiners(schema)
        # Determine what kind of input we have (init) and execute the
        # proper code to intiailize the model
        self._files_to_close = []
        self._iscopy = False

        is_array = False
        is_shape = False
        shape = None

        if init is None:
            asdf = AsdfFile(extensions=extensions)
        elif isinstance(init, dict):
            asdf = AsdfFile(init, extensions=extensions)
        elif isinstance(init, np.ndarray):
            asdf = AsdfFile(extensions=extensions)
            shape = init.shape
            is_array = True
        elif isinstance(init, self.__class__):
            self.clone(self, init)
            return
        elif isinstance(init, DataModel):
            raise TypeError(
                "Passed in {0!r} is not of the expected subclass {1!r}".format(
                    init.__class__.__name__, self.__class__.__name__))
        elif isinstance(init, AsdfFile):
            asdf = init
        elif isinstance(init, tuple):
            for item in init:
                if not isinstance(item, int):
                    raise ValueError("shape must be a tuple of ints")
            shape = init
            asdf = AsdfFile()
            is_shape = True
        elif isinstance(init, fits.HDUList):
            asdf = fits_support.from_fits(init, self._schema, extensions,
                                          pass_invalid_values)

        elif isinstance(init, (six.string_types, bytes)):
            if isinstance(init, bytes):
                init = init.decode(sys.getfilesystemencoding())
            file_type = filetype.check(init)

            if file_type == "fits":
                hdulist = fits.open(init)
                asdf = fits_support.from_fits(hdulist, self._schema,
                                              extensions, pass_invalid_values)
                self._files_to_close.append(hdulist)

            elif file_type == "asdf":
                asdf = AsdfFile.open(init, extensions=extensions)

            else:
                # TODO handle json files as well
                raise IOError(
                    "File does not appear to be a FITS or ASDF file.")

        else:
            raise ValueError("Can't initialize datamodel using {0}".format(
                str(type(init))))

        # Initialize object fields as determined fro the code above

        self._shape = shape
        self._instance = asdf.tree
        self._asdf = asdf
        self._ctx = self

        # if the input is from a file, set the filename attribute
        if isinstance(init, six.string_types):
            self.meta.filename = os.path.basename(init)
        elif isinstance(init, fits.HDUList):
            info = init.fileinfo(0)
            if info is not None:
                filename = info.get('filename')
                if filename is not None:
                    self.meta.filename = os.path.basename(filename)

        # if the input model doesn't have a date set, use the current date/time
        if self.meta.date is None:
            self.meta.date = Time(datetime.datetime.now())
            if hasattr(self.meta.date, 'value'):
                self.meta.date.format = 'isot'
                self.meta.date = str(self.meta.date.value)

        # store the data model type, if not already set
        if hasattr(self.meta, 'model_type'):
            if self.meta.model_type is None:
                self.meta.model_type = self.__class__.__name__
        else:
            self.meta.model_type = None

        if is_array:
            primary_array_name = self.get_primary_array_name()
            if primary_array_name is None:
                raise TypeError(
                    "Array passed to DataModel.__init__, but model has "
                    "no primary array in its schema")
            setattr(self, primary_array_name, init)

        # TODO this code looks useless
        if is_shape:
            getattr(self, self.get_primary_array_name())
コード例 #49
0
ファイル: model_base.py プロジェクト: hbushouse/jwst
    def __init__(self,
                 init=None,
                 schema=None,
                 extensions=None,
                 pass_invalid_values=False):
        """
        Parameters
        ----------
        init : shape tuple, file path, file object, astropy.io.fits.HDUList, numpy array, None

            - None: A default data model with no shape

            - shape tuple: Initialize with empty data of the given
              shape

            - file path: Initialize from the given file (FITS or ASDF)

            - readable file object: Initialize from the given file
              object

            - ``astropy.io.fits.HDUList``: Initialize from the given
              `~astropy.io.fits.HDUList`.

            - A numpy array: Used to initialize the data array

            - dict: The object model tree for the data model

        schema : tree of objects representing a JSON schema, or string naming a schema, optional
            The schema to use to understand the elements on the model.
            If not provided, the schema associated with this class
            will be used.
            
        extensions: classes extending the standard set of extensions
        
        pass_invalid_values: If True, values that do not validate the schema can
            be read and written, but with a warning message
        """
        filename = os.path.abspath(inspect.getfile(self.__class__))
        base_url = os.path.join(os.path.dirname(filename), 'schemas', '')

        if schema is None:
            schema_path = os.path.join(base_url, self.schema_url)
            schema = asdf_schema.load_schema(schema_path,
                                             resolve_references=True)

        self._schema = mschema.flatten_combiners(schema)

        if extensions is not None:
            extensions.extend(jwst_extensions)
        else:
            extensions = jwst_extensions[:]
        self._extensions = extensions

        if "PASS_INVALID_VALUES" in os.environ:
            pass_invalid_values = os.environ["PASS_INVALID_VALUES"]
            try:
                self._pass_invalid_values = bool(int(pass_invalid_values))
            except ValueError:
                self._pass_invalid_values = False
        else:
            self._pass_invalid_values = pass_invalid_values

        self._files_to_close = []
        is_array = False
        is_shape = False
        shape = None
        if init is None:
            asdf = AsdfFile(extensions=extensions)
        elif isinstance(init, dict):
            asdf = AsdfFile(init, extensions=extensions)
        elif isinstance(init, np.ndarray):
            asdf = AsdfFile(extensions=extensions)
            shape = init.shape
            is_array = True
        elif isinstance(init, self.__class__):
            instance = copy.deepcopy(init._instance)
            self._schema = init._schema
            self._shape = init._shape
            self._asdf = AsdfFile(instance, extensions=self._extensions)
            self._instance = instance
            self._ctx = self
            self.__class__ = init.__class__
            return
        elif isinstance(init, DataModel):
            raise TypeError(
                "Passed in {0!r} is not of the expected subclass {1!r}".format(
                    init.__class__.__name__, self.__class__.__name__))
        elif isinstance(init, AsdfFile):
            asdf = init
        elif isinstance(init, tuple):
            for item in init:
                if not isinstance(item, int):
                    raise ValueError("shape must be a tuple of ints")
            shape = init
            asdf = AsdfFile()
            is_shape = True
        elif isinstance(init, fits.HDUList):
            asdf = fits_support.from_fits(
                init,
                self._schema,
                extensions=self._extensions,
                validate=False,
                pass_invalid_values=self._pass_invalid_values)
        elif isinstance(init, six.string_types):
            if isinstance(init, bytes):
                init = init.decode(sys.getfilesystemencoding())
            try:
                hdulist = fits.open(init)
            except IOError:
                try:
                    asdf = AsdfFile.open(init, extensions=self._extensions)
                    # TODO: Add json support
                except ValueError:
                    raise IOError(
                        "File does not appear to be a FITS or ASDF file.")
            else:
                asdf = fits_support.from_fits(
                    hdulist,
                    self._schema,
                    extensions=self._extensions,
                    validate=False,
                    pass_invalid_values=self._pass_invalid_values)
                self._files_to_close.append(hdulist)

        self._shape = shape
        self._instance = asdf.tree
        self._asdf = asdf
        self._ctx = self

        # if the input model doesn't have a date set, use the current date/time
        if self.meta.date is None:
            self.meta.date = Time(datetime.datetime.now())
            self.meta.date.format = 'isot'
            self.meta.date = self.meta.date.value

        # if the input is from a file, set the filename attribute
        if isinstance(init, six.string_types):
            self.meta.filename = os.path.basename(init)

        if is_array:
            primary_array_name = self.get_primary_array_name()
            if primary_array_name is None:
                raise TypeError(
                    "Array passed to DataModel.__init__, but model has "
                    "no primary array in its schema")
            setattr(self, primary_array_name, init)

        if is_shape:
            getattr(self, self.get_primary_array_name())
コード例 #50
0
    def __init__(self,
                 init=None,
                 schema=None,
                 extensions=None,
                 pass_invalid_values=False,
                 strict_validation=False):
        """
        Parameters
        ----------
        init : shape tuple, file path, file object, astropy.io.fits.HDUList, numpy array, None

            - None: A default data model with no shape

            - shape tuple: Initialize with empty data of the given
              shape

            - file path: Initialize from the given file (FITS or ASDF)

            - readable file object: Initialize from the given file
              object

            - ``astropy.io.fits.HDUList``: Initialize from the given
              `~astropy.io.fits.HDUList`.

            - A numpy array: Used to initialize the data array

            - dict: The object model tree for the data model

        schema : tree of objects representing a JSON schema, or string naming a schema, optional
            The schema to use to understand the elements on the model.
            If not provided, the schema associated with this class
            will be used.

        extensions: classes extending the standard set of extensions, optional.
            If an extension is defined, the prefix used should be 'url'.

        pass_invalid_values: If true, values that do not validate the schema
            will be added to the metadata. If false, they will be set to None

        strict_validation: if true, an schema validation errors will generate
            an excption. If false, they will generate a warning.
        """
        # Set the extensions
        self._extensions = extensions

        # Override value of validation parameters
        # if environment value set
        self._pass_invalid_values = self.get_envar("PASS_INVALID_VALUES",
                                                   pass_invalid_values)
        self._strict_validation = self.get_envar("STRICT_VALIDATION",
                                                 strict_validation)

        # Construct the path to the schema files
        filename = os.path.abspath(inspect.getfile(self.__class__))
        base_url = os.path.join(os.path.dirname(filename), 'schemas', '')

        # Load the schema files
        if schema is None:
            schema_path = os.path.join(base_url, self.schema_url)
            # Create an AsdfFile so we can use its resolver for loading schemas
            asdf_file = AsdfFile(extensions=self._extensions)
            if hasattr(asdf_file, 'resolver'):
                file_resolver = asdf_file.resolver
            else:
                file_resolver = self.get_resolver(asdf_file)
            schema = asdf_schema.load_schema(schema_path,
                                             resolver=file_resolver,
                                             resolve_references=True)

        self._schema = mschema.flatten_combiners(schema)

        # Provide the object as context to other classes and functions
        self._ctx = self

        # Determine what kind of input we have (init) and execute the
        # proper code to intiailize the model
        self._files_to_close = []
        self._iscopy = False
        is_array = False
        is_shape = False
        shape = None

        if init is None:
            asdf = AsdfFile(extensions=extensions)

        elif isinstance(init, dict):
            asdf = AsdfFile(init, extensions=self._extensions)

        elif isinstance(init, np.ndarray):
            asdf = AsdfFile(extensions=self._extensions)
            shape = init.shape
            is_array = True

        elif isinstance(init, tuple):
            for item in init:
                if not isinstance(item, int):
                    raise ValueError("shape must be a tuple of ints")

            shape = init
            asdf = AsdfFile()
            is_shape = True

        elif isinstance(init, DataModel):
            self.clone(self, init)
            if not isinstance(init, self.__class__):
                self.validate()
            return

        elif isinstance(init, AsdfFile):
            asdf = init

        elif isinstance(init, fits.HDUList):
            asdf = fits_support.from_fits(init, self._schema, self._extensions,
                                          self._ctx)

        elif isinstance(init, (str, bytes)):
            if isinstance(init, bytes):
                init = init.decode(sys.getfilesystemencoding())
            file_type = filetype.check(init)

            if file_type == "fits":
                hdulist = fits.open(init)
                asdf = fits_support.from_fits(hdulist, self._schema,
                                              self._extensions, self._ctx)

                self._files_to_close.append(hdulist)

            elif file_type == "asdf":
                asdf = AsdfFile.open(init, extensions=self._extensions)

            else:
                # TODO handle json files as well
                raise IOError(
                    "File does not appear to be a FITS or ASDF file.")

        else:
            raise ValueError("Can't initialize datamodel using {0}".format(
                str(type(init))))

        # Initialize object fields as determined from the code above
        self._shape = shape
        self._instance = asdf.tree
        self._asdf = asdf

        # Initalize class dependent hidden fields
        self._no_asdf_extension = False

        # Instantiate the primary array of the image
        if is_array:
            primary_array_name = self.get_primary_array_name()
            if not primary_array_name:
                raise TypeError(
                    "Array passed to DataModel.__init__, but model has "
                    "no primary array in its schema")
            setattr(self, primary_array_name, init)

        if is_shape:
            if not self.get_primary_array_name():
                raise TypeError(
                    "Shape passed to DataModel.__init__, but model has "
                    "no primary array in its schema")

        # if the input is from a file, set the filename attribute
        if isinstance(init, str):
            self.meta.filename = os.path.basename(init)
        elif isinstance(init, fits.HDUList):
            info = init.fileinfo(0)
            if info is not None:
                filename = info.get('filename')
                if filename is not None:
                    self.meta.filename = os.path.basename(filename)

        # if the input model doesn't have a date set, use the current date/time
        if not self.meta.hasattr('date'):
            current_date = Time(datetime.datetime.now())
            current_date.format = 'isot'
            self.meta.date = current_date.value

        # store the data model type, if not already set
        klass = self.__class__.__name__
        if klass != 'DataModel':
            if not self.meta.hasattr('model_type'):
                self.meta.model_type = klass
コード例 #51
0
def test_custom_and_analytical(model):
    fa = AsdfFile()
    fa.tree['model'] = model
    fa.write_to('custom_and_analytical_inverse.asdf')
    f = asdf.open('custom_and_analytical_inverse.asdf')
    assert f.tree['model'].inverse is not None
コード例 #52
0
ファイル: parse.py プロジェクト: m09/mloncode
def parse(*, raw_dir: str, uasts_dir: str, configs_dir: str,
          log_level: str) -> None:
    """Parse a CodRep 2019 dataset into UASTs."""
    Config.from_arguments(locals(), ["raw_dir", "uasts_dir"],
                          "configs_dir").save(
                              Path(configs_dir) / "parse.json")
    logger = setup_logging(__name__, log_level)
    raw_dir_path = Path(raw_dir).expanduser().resolve()
    uasts_dir_path = Path(uasts_dir).expanduser().resolve()
    uasts_dir_path.mkdir(parents=True, exist_ok=True)

    parser = JavaParser(split_formatting=True)
    logger.info("Parsing %s", raw_dir_path)
    labels_file = raw_dir_path / "out.txt"
    extract_labels = labels_file.is_file()
    if extract_labels:
        error_offsets = {}
        for i, line in enumerate(labels_file.open("r", encoding="utf8")):
            error_offsets["%d.txt" % i] = int(line) - 1
    for file_path in raw_dir_path.rglob("*.txt"):
        if file_path.samefile(labels_file):
            continue
        file_path_relative = file_path.relative_to(raw_dir_path)
        start = time()
        logger.debug("Parsing %s", file_path_relative)
        try:
            nodes = parser.parse(raw_dir_path, file_path_relative)
        except ParsingException:
            continue
        logger.debug(
            "Parsed  %s into %d nodes in %.2fms",
            file_path_relative,
            len(nodes.nodes),
            (time() - start) * 1000,
        )
        error_node_index = None
        if extract_labels:
            error_offset = error_offsets[file_path.name]
            for formatting_i, i in enumerate(nodes.formatting_indexes):
                node = nodes.nodes[i]
                if node.start == error_offset:
                    error_node_index = formatting_i
                    break
            else:
                logger.warning(
                    "Could not retrieve a formatting node for the error at offset %d "
                    "of file %s.",
                    error_offset,
                    file_path.with_suffix("").name,
                )
                continue
        codrep_label = CodRepLabel(
            error_index=error_node_index,
            n_formatting_nodes=len(nodes.formatting_indexes),
        )
        output_subdirectory = uasts_dir_path / file_path_relative.parent
        output_subdirectory.mkdir(parents=True, exist_ok=True)
        with (output_subdirectory /
              file_path.with_suffix(".asdf").name).open("wb") as fh:
            af = AsdfFile(
                dict(
                    nodes=nodes.to_tree(file_path.read_text(encoding="utf-8")),
                    codrep_label=codrep_label.to_tree(),
                    filepath=str(file_path_relative),
                ))
            af.write_to(fh, all_array_compression="bzp2")
コード例 #53
0
def _check_type(validator, types, instance, schema):
    """
    Callback to check data type. Skips over null values.
    """
    if instance is None:
        errors = []
    else:
        errors = asdf_schema.validate_type(validator, types,
                                           instance, schema)
    return errors


validator_callbacks = HashableDict(asdf_schema.YAML_VALIDATORS)
validator_callbacks.update({'type': _check_type})

validator_context = AsdfFile()
validator_resolver = validator_context.resolver


def _check_value(value, schema):
    """
    Perform the actual validation.
    """
    if value is None:
        if schema.get('fits_required'):
            name = schema.get("fits_keyword") or schema.get("fits_hdu")
            raise jsonschema.ValidationError("%s is a required value"
                                              % name)
    else:
        temp_schema = {
            '$schema':
コード例 #54
0
ファイル: example.py プロジェクト: Guillemdb/asdf
    def run(self):
        filename = self.arguments[0]

        cwd = os.getcwd()
        os.chdir(TMPDIR)

        parts = []
        try:
            ff = AsdfFile()
            code = AsdfFile._open_impl(ff, filename, _get_yaml_content=True)
            code = '{0} {1}\n'.format(
                ASDF_MAGIC, version_string) + code.strip().decode('utf-8')
            literal = nodes.literal_block(code, code)
            literal['language'] = 'yaml'
            set_source_info(self, literal)
            parts.append(literal)

            with AsdfFile.open(filename) as ff:
                for i, block in enumerate(ff.blocks.internal_blocks):
                    data = codecs.encode(block.data.tostring(), 'hex')
                    if len(data) > 40:
                        data = data[:40] + '...'.encode()
                    allocated = block._allocated
                    size = block._size
                    data_size = block._data_size
                    flags = block._flags

                    if flags & BLOCK_FLAG_STREAMED:
                        allocated = size = data_size = 0

                    lines = []
                    lines.append('BLOCK {0}:'.format(i))

                    human_flags = []
                    for key, val in FLAGS.items():
                        if flags & key:
                            human_flags.append(val)
                    if len(human_flags):
                        lines.append('    flags: {0}'.format(
                            ' | '.join(human_flags)))
                    if block.input_compression:
                        lines.append('    compression: {0}'.format(
                            block.input_compression))
                    lines.append('    allocated_size: {0}'.format(allocated))
                    lines.append('    used_size: {0}'.format(size))
                    lines.append('    data_size: {0}'.format(data_size))
                    lines.append('    data: {0}'.format(data))

                    code = '\n'.join(lines)

                    literal = nodes.literal_block(code, code)
                    literal['language'] = 'yaml'
                    set_source_info(self, literal)
                    parts.append(literal)

                internal_blocks = list(ff.blocks.internal_blocks)
                if (len(internal_blocks)
                        and internal_blocks[-1].array_storage != 'streamed'):
                    buff = io.BytesIO()
                    ff.blocks.write_block_index(buff, ff)
                    block_index = buff.getvalue()
                    literal = nodes.literal_block(block_index, block_index)
                    literal['language'] = 'yaml'
                    set_source_info(self, literal)
                    parts.append(literal)

        finally:
            os.chdir(cwd)

        result = nodes.admonition()
        textnodes, messages = self.state.inline_text(filename, self.lineno)
        title = nodes.title(filename, '', *textnodes)
        result += title
        result += parts
        return [result]
コード例 #55
0
def dist_coeff():
    """ Create distortion correction reference file.

	Parameters:
	___________
	coefficients: float
		coefficients

	The order of the input text files with coefficients is from det2sky and then sky2det.
	"""

    "Put coefficients into variables by reading in text files"
    coeffxr, coeffyr = np.loadtxt(sys.argv[1],
                                  skiprows=6,
                                  usecols=(1, 2),
                                  unpack=True)
    print(coeffxr)
    print(coeffyr)

    coeffxi, coeffyi = np.loadtxt(sys.argv[2],
                                  skiprows=6,
                                  usecols=(1, 2),
                                  unpack=True)
    print(coeffxi)
    print(coeffyi)

    "Transform coefficients to get undistorted, real, sky x."
    x0_0r = coeffxr[0]
    print('Coefficient for x0_0: ', x0_0r)
    x1_0r = coeffxr[1]
    print('Coefficient for x1_0: ', x1_0r)
    x0_1r = coeffxr[2]
    print('Coefficient for x0_1: ', x0_1r)
    x2_0r = coeffxr[3]
    print('Coefficient for x2_0: ', x2_0r)
    x1_1r = coeffxr[4]
    print('Coefficient for x1_1: ', x1_1r)
    x0_2r = coeffxr[5]
    print('Coefficient for x0_2: ', x0_2r)
    x3_0r = coeffxr[6]
    print('Coefficient for x3_0: ', x3_0r)
    x2_1r = coeffxr[7]
    print('Coefficient for x2_1: ', x2_1r)
    x1_2r = coeffxr[8]
    print('Coefficient for x1_2: ', x1_2r)
    x0_3r = coeffxr[9]
    print('Coefficient for x0_3: ', x0_3r)
    x4_0r = coeffxr[10]
    print('Coefficient for x4_0: ', x4_0r)
    x3_1r = coeffxr[11]
    print('Coefficient for x3_1: ', x3_1r)
    x2_2r = coeffxr[12]
    print('Coefficient for x2_2: ', x2_2r)
    x1_3r = coeffxr[13]
    print('Coefficient for x1_3: ', x1_3r)
    x0_4r = coeffxr[14]
    print('Coefficient for x0_4: ', x0_4r)

    "Transform coefficients to get undistorted, real, sky y."
    y0_0r = coeffyr[0]
    print('Coefficient for y0_0: ', y0_0r)
    y1_0r = coeffyr[1]
    print('Coefficient for y1_0: ', y1_0r)
    y0_1r = coeffyr[2]
    print('Coefficient for y0_1: ', y0_1r)
    y2_0r = coeffyr[3]
    print('Coefficient for y2_0: ', y2_0r)
    y1_1r = coeffyr[4]
    print('Coefficient for y1_1: ', y1_1r)
    y0_2r = coeffyr[5]
    print('Coefficient for y0_2: ', y0_2r)
    y3_0r = coeffyr[6]
    print('Coefficient for y3_0: ', y3_0r)
    y2_1r = coeffyr[7]
    print('Coefficient for y2_1: ', y2_1r)
    y1_2r = coeffyr[8]
    print('Coefficient for y1_2: ', y1_2r)
    y0_3r = coeffyr[9]
    print('Coefficient for y0_3: ', y0_3r)
    y4_0r = coeffyr[10]
    print('Coefficient for y4_0: ', y4_0r)
    y3_1r = coeffyr[11]
    print('Coefficient for y3_1: ', y3_1r)
    y2_2r = coeffyr[12]
    print('Coefficient for y2_2: ', y2_2r)
    y1_3r = coeffyr[13]
    print('Coefficient for y1_3: ', y1_3r)
    y0_4r = coeffyr[14]
    print('Coefficient for y0_4: ', y0_4r)

    "Transform coefficients to get distorted, ideal, detector x."
    x0_0i = coeffxi[0]
    print('Coefficient for x0_0: ', x0_0i)
    x1_0i = coeffxi[1]
    print('Coefficient for x1_0: ', x1_0i)
    x0_1i = coeffxi[2]
    print('Coefficient for x0_1: ', x0_1i)
    x2_0i = coeffxi[3]
    print('Coefficient for x2_0: ', x2_0i)
    x1_1i = coeffxi[4]
    print('Coefficient for x1_1: ', x1_1i)
    x0_2i = coeffxi[5]
    print('Coefficient for x0_2: ', x0_2i)
    x3_0i = coeffxi[6]
    print('Coefficient for x3_0: ', x3_0i)
    x2_1i = coeffxi[7]
    print('Coefficient for x2_1: ', x2_1i)
    x1_2i = coeffxi[8]
    print('Coefficient for x1_2: ', x1_2i)
    x0_3i = coeffxi[9]
    print('Coefficient for x0_3: ', x0_3i)
    x4_0i = coeffxi[10]
    print('Coefficient for x4_0: ', x4_0i)
    x3_1i = coeffxi[11]
    print('Coefficient for x3_1: ', x3_1i)
    x2_2i = coeffxi[12]
    print('Coefficient for x2_2: ', x2_2i)
    x1_3i = coeffxi[13]
    print('Coefficient for x1_3: ', x1_3i)
    x0_4i = coeffxi[14]
    print('Coefficient for x0_4: ', x0_4i)

    "Transform coefficients to get distorted, ideal, detector y."
    y0_0i = coeffyi[0]
    print('Coefficient for y0_0: ', y0_0i)
    y1_0i = coeffyi[1]
    print('Coefficient for y1_0: ', y1_0i)
    y0_1i = coeffyi[2]
    print('Coefficient for y0_1: ', y0_1i)
    y2_0i = coeffyi[3]
    print('Coefficient for y2_0: ', y2_0i)
    y1_1i = coeffyi[4]
    print('Coefficient for y1_1: ', y1_1i)
    y0_2i = coeffyi[5]
    print('Coefficient for y0_2: ', y0_2i)
    y3_0i = coeffyi[6]
    print('Coefficient for y3_0: ', y3_0i)
    y2_1i = coeffyi[7]
    print('Coefficient for y2_1: ', y2_1i)
    y1_2i = coeffyi[8]
    print('Coefficient for y1_2: ', y1_2i)
    y0_3i = coeffyi[9]
    print('Coefficient for y0_3: ', y0_3i)
    y4_0i = coeffyi[10]
    print('Coefficient for y4_0: ', y4_0i)
    y3_1i = coeffyi[11]
    print('Coefficient for y3_1: ', y3_1i)
    y2_2i = coeffyi[12]
    print('Coefficient for y2_2: ', y2_2i)
    y1_3i = coeffyi[13]
    print('Coefficient for y1_3: ', y1_3i)
    y0_4i = coeffyi[14]
    print('Coefficient for y0_4: ', y0_4i)

    "Generate ideal or detector coordinates."
    x_pix = np.arange(0, 2048, 1)
    y_pix = np.arange(0, 2048, 1)

    x_det_orig = []
    y_det_orig = []

    for x, y in itertools.product(x_pix, y_pix):
        x_det_orig.append(x)
        y_det_orig.append(y)

    print('Done with first for loop!!!!')

    print(len(x_det_orig))
    print(len(y_det_orig))

    "To go from ideal, distorted to real, undistorted."

    x_sky = []
    y_sky = []

    #-------
    for x, y in zip(x_det_orig, y_det_orig):
        x_real, y_real = apply_coeff(x, y, coeffxr, coeffyr)
        x_sky.append(x_real)
        y_sky.append(y_real)
    #-------

    print(' ')
    print('Done with second for loop!!!!')

    print(len(x_sky))
    print(len(y_sky))
    x_sky = np.array(x_sky)
    y_sky = np.array(y_sky)
    print('Above is for transformation to sky pixels.')

    "To go from real, undistorted to ideal, distorted."

    x_det = []
    y_det = []

    for x, y in zip(x_sky, y_sky):
        x_ideal, y_ideal = apply_coeff(x, y, coeffxi, coeffyi)
        x_det.append(x_ideal)
        y_det.append(y_ideal)

    print(' ')
    print('Done with third for loop!!!!')

    print(len(x_det))
    print(len(y_det))
    print('Above is for transformation to detector pixels.')

    "Derive residuals from the complete transformation."
    x_det = np.array(x_det)
    y_det = np.array(y_det)
    x_det_orig = np.array(x_det_orig)
    y_det_orig = np.array(y_det_orig)

    resid_x = x_det - x_det_orig
    resid_y = y_det - y_det_orig
    print(' ')
    print('Residual in x: ', resid_x)
    print(np.min(resid_x), np.max(resid_x))
    print(' ')
    print('Residual in y: ', resid_y)
    print(np.min(resid_y), np.max(resid_y))

    'Open asdf reference file and transfrom from det to sky.'
    f = AsdfFile.open('niriss_ref_distortion_image.asdf')
    det2sky_trans = f.tree['model']
    sky_x, sky_y = det2sky_trans(x_det_orig, y_det_orig)

    'Transform from sky to det.'
    sky2det_trans = det2sky_trans.inverse
    inv_x, inv_y = sky2det_trans(sky_x, sky_y)

    'Calculate residuals from reference file.'
    x_resid_ref = inv_x - x_det_orig
    y_resid_ref = inv_y - y_det_orig

    'Calculate residuals of residuals.'
    x_resid_resid = np.absolute(resid_x - x_resid_ref)
    y_resid_resid = np.absolute(resid_y - y_resid_ref)

    print(' ')
    print(x_resid_resid)
    print('The minimum in x_resid_resid is: ', np.min(x_resid_resid))
    print('The maximum in x_resid_resid is: ', np.max(x_resid_resid))
    print('The mean in x_resid_resid is: ', np.mean(x_resid_resid))
    print('The standard deviation in x_resid_resid is: ',
          np.std(x_resid_resid))
    print('The median in x_resid_resid is: ', np.median(x_resid_resid))

    print(' ')
    print(y_resid_resid)
    print('The minimum in y_resid_resid is: ', np.min(y_resid_resid))
    print('The maximum in y_resid_resid is: ', np.max(y_resid_resid))
    print('The mean in y_resid_resid is: ', np.mean(y_resid_resid))
    print('The standard deviation in y_resid_resid is: ',
          np.std(y_resid_resid))
    print('The median in y_resid_resid is: ', np.median(y_resid_resid))

    print(' ')
    print('Done, Done, and Done. Git the bleep along little doggies.')
コード例 #56
0
    def __init__(self,
                 init=None,
                 schema=None,
                 memmap=False,
                 pass_invalid_values=None,
                 strict_validation=None,
                 ignore_missing_extensions=True,
                 **kwargs):
        """
        Parameters
        ----------
        init : str, tuple, `~astropy.io.fits.HDUList`, ndarray, dict, None

            - None : Create a default data model with no shape.

            - tuple : Shape of the data array.
              Initialize with empty data array with shape specified by the.

            - file path: Initialize from the given file (FITS or ASDF)

            - readable file object: Initialize from the given file
              object

            - `~astropy.io.fits.HDUList` : Initialize from the given
              `~astropy.io.fits.HDUList`.

            - A numpy array: Used to initialize the data array

            - dict: The object model tree for the data model

        schema : dict, str (optional)
            Tree of objects representing a JSON schema, or string naming a schema.
            The schema to use to understand the elements on the model.
            If not provided, the schema associated with this class
            will be used.

        memmap : bool
            Turn memmap of FITS file on or off.  (default: False).  Ignored for
            ASDF files.

        pass_invalid_values : bool or None
            If `True`, values that do not validate the schema
            will be added to the metadata. If `False`, they will be set to `None`.
            If `None`, value will be taken from the environmental PASS_INVALID_VALUES.
            Otherwise the default value is `False`.

        strict_validation : bool or None
            If `True`, schema validation errors will generate
            an exception. If `False`, they will generate a warning.
            If `None`, value will be taken from the environmental STRICT_VALIDATION.
            Otherwise, the default value is `False`.

        ignore_missing_extensions : bool
            When `False`, raise warnings when a file is read that
            contains metadata about extensions that are not available.
            Defaults to `True`.

        kwargs : dict
            Additional arguments passed to lower level functions.
        """

        # Override value of validation parameters if not explicitly set.
        if not pass_invalid_values:
            pass_invalid_values = self._get_envar_as_boolean(
                "PASS_INVALID_VALUES", False)
        self._pass_invalid_values = pass_invalid_values
        if not strict_validation:
            strict_validation = self._get_envar_as_boolean(
                "STRICT_VALIDATION", False)
        self._strict_validation = strict_validation
        self._ignore_missing_extensions = ignore_missing_extensions

        kwargs.update({'ignore_missing_extensions': ignore_missing_extensions})

        # Load the schema files
        if schema is None:
            # Create an AsdfFile so we can use its resolver for loading schemas
            asdf_file = AsdfFile()
            schema = asdf_schema.load_schema(self.schema_url,
                                             resolver=asdf_file.resolver,
                                             resolve_references=True)

        self._schema = mschema.merge_property_trees(schema)

        # Provide the object as context to other classes and functions
        self._ctx = self

        # Determine what kind of input we have (init) and execute the
        # proper code to intiailize the model
        self._files_to_close = []
        self._iscopy = False
        is_array = False
        is_shape = False
        shape = None

        if init is None:
            asdffile = self.open_asdf(init=None, **kwargs)

        elif isinstance(init, dict):
            asdffile = self.open_asdf(init=init, **kwargs)

        elif isinstance(init, np.ndarray):
            asdffile = self.open_asdf(init=None, **kwargs)

            shape = init.shape
            is_array = True

        elif isinstance(init, tuple):
            for item in init:
                if not isinstance(item, int):
                    raise ValueError("shape must be a tuple of ints")

            shape = init
            is_shape = True
            asdffile = self.open_asdf(init=None, **kwargs)

        elif isinstance(init, DataModel):
            asdffile = None
            self.clone(self, init)
            if not isinstance(init, self.__class__):
                self.validate()
            return

        elif isinstance(init, AsdfFile):
            asdffile = init

        elif isinstance(init, fits.HDUList):
            asdffile = fits_support.from_fits(init, self._schema, self._ctx,
                                              **kwargs)

        elif isinstance(init, (str, bytes)):
            if isinstance(init, bytes):
                init = init.decode(sys.getfilesystemencoding())
            file_type = filetype.check(init)

            if file_type == "fits":
                if s3_utils.is_s3_uri(init):
                    init_fitsopen = s3_utils.get_object(init)
                    memmap = None
                else:
                    init_fitsopen = init

                with fits.open(init_fitsopen, memmap=memmap) as hdulist:
                    asdffile = fits_support.from_fits(hdulist, self._schema,
                                                      self._ctx, **kwargs)
                    self._files_to_close.append(hdulist)

            elif file_type == "asdf":
                asdffile = self.open_asdf(init=init, **kwargs)

            else:
                # TODO handle json files as well
                raise IOError(
                    "File does not appear to be a FITS or ASDF file.")

        else:
            raise ValueError("Can't initialize datamodel using {0}".format(
                str(type(init))))

        # Initialize object fields as determined from the code above
        self._shape = shape
        self._instance = asdffile.tree
        self._asdf = asdffile

        # Initalize class dependent hidden fields
        self._no_asdf_extension = False

        # Instantiate the primary array of the image
        if is_array:
            primary_array_name = self.get_primary_array_name()
            if not primary_array_name:
                raise TypeError(
                    "Array passed to DataModel.__init__, but model has "
                    "no primary array in its schema")
            setattr(self, primary_array_name, init)

        # If a shape has been given, initialize the primary array.
        if is_shape:
            primary_array_name = self.get_primary_array_name()
            if not primary_array_name:
                raise TypeError(
                    "Shape passed to DataModel.__init__, but model has "
                    "no primary array in its schema")

            # Initialization occurs when the primary array is first
            # referenced. Do so now.
            getattr(self, primary_array_name)

        # if the input is from a file, set the filename attribute
        if isinstance(init, str):
            self.meta.filename = os.path.basename(init)
        elif isinstance(init, fits.HDUList):
            info = init.fileinfo(0)
            if info is not None:
                filename = info.get('filename')
                if filename is not None:
                    self.meta.filename = os.path.basename(filename)

        # if the input model doesn't have a date set, use the current date/time
        if not self.meta.hasattr('date'):
            current_date = Time(datetime.datetime.now())
            current_date.format = 'isot'
            self.meta.date = current_date.value

        # store the data model type, if not already set
        klass = self.__class__.__name__
        if klass != 'DataModel':
            if not self.meta.hasattr('model_type'):
                self.meta.model_type = klass

        # initialize arrays from keyword arguments when they are present

        for attr, value in kwargs.items():
            if value is not None:
                subschema = properties._get_schema_for_property(
                    self._schema, attr)
                if 'datatype' in subschema:
                    setattr(self, attr, value)
コード例 #57
0
import pytest

import numpy as np

import asdf
from asdf import AsdfFile
from asdf import asdftypes
from asdf import block
from asdf import schema
from asdf import extension
from asdf import treeutil
from asdf import util
from asdf import versioning
from . import helpers, CustomTestType

_ctx = AsdfFile()
_resolver = _ctx.resolver


class LabelMapperTestType(CustomTestType):
    version = '1.0.0'
    name = 'transform/label_mapper'


class RegionsSelectorTestType(CustomTestType):
    version = '1.0.0'
    name = 'transform/regions_selector'


class TestExtension(extension.BuiltinExtension):
    """This class defines an extension that represents tags whose
コード例 #58
0
def create_miri_imager_distortion(distfile, outname):
    """
    Create an asdf reference file with all distortion components for the MIRI imager.
    The filter offsets are stored in a sepaate file.

    Note: The IDT supplied distortion file lists sky to pixel as the
    forward transform. Since "forward" in the JWST pipeline is from
    pixel to sky, the meaning of forward and inverse matrices and the order
    in which they are applied is switched.

    The order of operation from pixel to sky is:
    - Apply MI matrix
    - Apply Ai and BI matrices
    - Apply the TI matrix (this gives V2/V3 coordinates)

    Parameters
    ----------
    distfile : str
        MIRI imager DISTORTION file provided by the IDT team.
    outname : str
        Name of reference file to be wriiten to disk.

    Returns
    -------
    fasdf : AsdfFile
        AsdfFile object

    Examples
    --------
    >>> create_miri_imager_distortion("MIRI_FM_MIRIMAGE_DISTORTION_03.02.00.fits", 'test.asdf')
    """
    fdist = fits.open(distfile)
    mi_matrix = fdist[8].data
    mi_col = models.Polynomial1D(1,
                                 c0=mi_matrix[0, 2],
                                 c1=mi_matrix[0, 0],
                                 name="M_column_correction")
    mi_row = models.Polynomial1D(1,
                                 c0=mi_matrix[1, 2],
                                 c1=mi_matrix[1, 1],
                                 name="M_row_correction")
    m_matrix = fdist[4].data
    m_col = models.Polynomial1D(1, c0=m_matrix[0, 2], c1=m_matrix[0, 0])
    m_row = models.Polynomial1D(1, c0=m_matrix[1, 2], c1=m_matrix[1, 1])
    mi_col.inverse = m_col
    mi_row.inverse = m_row
    m_transform = mi_col & mi_row  #mi_row & mi_col

    ai_matrix = fdist[6].data
    a_matrix = fdist[2].data
    col_poly = polynomial_from_coeffs_matrix(ai_matrix, name="A_correction")
    col_poly.inverse = polynomial_from_coeffs_matrix(a_matrix)
    bi_matrix = fdist[5].data
    b_matrix = fdist[1].data
    row_poly = polynomial_from_coeffs_matrix(bi_matrix, name="B_correction")
    row_poly.inverse = polynomial_from_coeffs_matrix(b_matrix)
    poly = col_poly & row_poly

    ti_matrix = fdist[7].data
    t_matrix = fdist[3].data
    ti_col = models.Polynomial2D(1, name='TI_column_correction')
    ti_col.parameters = ti_matrix[0][::-1]
    ti_row = models.Polynomial2D(1, name='TI_row_correction')
    ti_row.parameters = ti_matrix[1][::-1]

    t_col = models.Polynomial2D(1, name='T_column_correction')
    t_col.parameters = t_matrix[0][::-1]
    t_row = models.Polynomial2D(1, name='T_row_correction')
    t_row.parameters = t_matrix[1][::-1]
    ti_col.inverse = t_col
    ti_row.inverse = t_row
    t_transform = ti_row & ti_col

    mapping = models.Mapping([0, 1, 0, 1])
    mapping.inverse = models.Identity(2)

    # ident is created here so that mapping can be assigned as inverse
    ident = models.Identity(2)
    ident.inverse = models.Mapping([0, 1, 0, 1])

    poly2t_mapping = models.Mapping([0, 1, 0, 1])
    poly2t_mapping.inverse = models.Mapping([0, 1, 0, 1])

    distortion_transform = m_transform | mapping | poly | poly2t_mapping | t_transform | ident | models.Mapping(
        [1, 0])

    fdist.close()
    f = AsdfFile()
    tree = {
        "title": "MIRI imager distortion - CDP4",
        "reftype": "DISTORTION",
        "instrument": "MIRI",
        "detector": "MIRIMAGE",
        "exp_type": "MIR_IMAGE",
        "pedigree": "GROUND",
        "author": "N. Dencheva",
        "model": distortion_transform
    }
    f.tree = tree
    fasdf = f.write_to(outname)
コード例 #59
0
def imaging_distortion(input_model, reference_files):
    distortion = AsdfFile.open(reference_files['distortion']).tree['model']
    return distortion
コード例 #60
0
    def __init__(self,
                 init=None,
                 schema=None,
                 memmap=False,
                 pass_invalid_values=None,
                 strict_validation=None,
                 validate_on_assignment=None,
                 ignore_missing_extensions=True,
                 **kwargs):
        """
        Parameters
        ----------
        init : str, tuple, `~astropy.io.fits.HDUList`, ndarray, dict, None

            - None : Create a default data model with no shape.

            - tuple : Shape of the data array.
              Initialize with empty data array with shape specified by the.

            - file path: Initialize from the given file (FITS or ASDF)

            - readable file object: Initialize from the given file
              object

            - `~astropy.io.fits.HDUList` : Initialize from the given
              `~astropy.io.fits.HDUList`.

            - A numpy array: Used to initialize the data array

            - dict: The object model tree for the data model

        schema : dict, str (optional)
            Tree of objects representing a JSON schema, or string naming a schema.
            The schema to use to understand the elements on the model.
            If not provided, the schema associated with this class
            will be used.

        memmap : bool
            Turn memmap of FITS file on or off.  (default: False).  Ignored for
            ASDF files.

        pass_invalid_values : bool or None
            If `True`, values that do not validate the schema
            will be added to the metadata. If `False`, they will be set to `None`.
            If `None`, value will be taken from the environmental PASS_INVALID_VALUES.
            Otherwise the default value is `False`.

        strict_validation : bool or None
            If `True`, schema validation errors will generate
            an exception. If `False`, they will generate a warning.
            If `None`, value will be taken from the environmental STRICT_VALIDATION.
            Otherwise, the default value is `False`.

        validate_on_assignment : bool or None
            Defaults to 'None'.
            If `None`, value will be taken from the environmental VALIDATE_ON_ASSIGNMENT,
            defaulting to 'True' if  no environment variable is set.
            If 'True', attribute assignments are validated at the time of assignment.
            Validation errors generate warnings and values will be set to `None`.
            If 'False', schema validation occurs only once at the time of write.
            Validation errors generate warnings.

        ignore_missing_extensions : bool
            When `False`, raise warnings when a file is read that
            contains metadata about extensions that are not available.
            Defaults to `True`.

        kwargs : dict
            Additional keyword arguments passed to lower level functions. These arguments
            are generally file format-specific. Arguments of note are:

            - FITS

              skip_fits_update - bool or None
                  `True` to skip updating the ASDF tree from the FITS headers, if possible.
                  If `None`, value will be taken from the environmental SKIP_FITS_UPDATE.
                  Otherwise, the default value is `True`.
        """

        # Override value of validation parameters if not explicitly set.
        if pass_invalid_values is None:
            pass_invalid_values = get_envar_as_boolean("PASS_INVALID_VALUES",
                                                       False)
        self._pass_invalid_values = pass_invalid_values
        if strict_validation is None:
            strict_validation = get_envar_as_boolean("STRICT_VALIDATION",
                                                     False)
        if validate_on_assignment is None:
            validate_on_assignment = get_envar_as_boolean(
                "VALIDATE_ON_ASSIGNMENT", True)
        self._strict_validation = strict_validation
        self._ignore_missing_extensions = ignore_missing_extensions
        self._validate_on_assignment = validate_on_assignment

        kwargs.update({'ignore_missing_extensions': ignore_missing_extensions})

        # Load the schema files
        if schema is None:
            if self.schema_url is None:
                schema = _DEFAULT_SCHEMA
            else:
                # Create an AsdfFile so we can use its resolver for loading schemas
                schema = asdf_schema.load_schema(self.schema_url,
                                                 resolve_references=True)

        self._schema = mschema.merge_property_trees(schema)

        # Provide the object as context to other classes and functions
        self._ctx = self

        # Initialize with an empty AsdfFile instance as this is needed for
        # reading in FITS files where validate._check_value() gets called, and
        # ctx needs to have an _asdf attribute.
        self._asdf = AsdfFile()

        # Determine what kind of input we have (init) and execute the
        # proper code to intiailize the model
        self._files_to_close = []
        self._iscopy = False
        is_array = False
        is_shape = False
        shape = None

        if init is None:
            asdffile = self.open_asdf(init=None, **kwargs)

        elif isinstance(init, dict):
            asdffile = self.open_asdf(init=init, **kwargs)

        elif isinstance(init, np.ndarray):
            asdffile = self.open_asdf(init=None, **kwargs)

            shape = init.shape
            is_array = True

        elif isinstance(init, tuple):
            for item in init:
                if not isinstance(item, int):
                    raise ValueError("shape must be a tuple of ints")

            shape = init
            is_shape = True
            asdffile = self.open_asdf(init=None, **kwargs)

        elif isinstance(init, DataModel):
            asdffile = None
            self.clone(self, init)
            if not isinstance(init, self.__class__):
                self.validate()
            return

        elif isinstance(init, AsdfFile):
            asdffile = init

        elif isinstance(init, fits.HDUList):
            asdffile = fits_support.from_fits(init, self._schema, self._ctx,
                                              **kwargs)

        elif isinstance(init, (str, bytes, PurePath)):
            if isinstance(init, PurePath):
                init = str(init)
            if isinstance(init, bytes):
                init = init.decode(sys.getfilesystemencoding())
            file_type = filetype.check(init)

            if file_type == "fits":
                if s3_utils.is_s3_uri(init):
                    init_fitsopen = s3_utils.get_object(init)
                    memmap = None
                else:
                    init_fitsopen = init

                hdulist = fits.open(init_fitsopen, memmap=memmap)
                asdffile = fits_support.from_fits(hdulist, self._schema,
                                                  self._ctx, **kwargs)
                self._files_to_close.append(hdulist)

            elif file_type == "asdf":
                asdffile = self.open_asdf(init=init, **kwargs)

            else:
                # TODO handle json files as well
                raise IOError(
                    "File does not appear to be a FITS or ASDF file.")

        else:
            raise ValueError("Can't initialize datamodel using {0}".format(
                str(type(init))))

        # Initialize object fields as determined from the code above
        self._shape = shape
        self._instance = asdffile.tree
        self._asdf = asdffile

        # Initalize class dependent hidden fields
        self._no_asdf_extension = False

        # Instantiate the primary array of the image
        if is_array:
            primary_array_name = self.get_primary_array_name()
            if not primary_array_name:
                raise TypeError(
                    "Array passed to DataModel.__init__, but model has "
                    "no primary array in its schema")
            setattr(self, primary_array_name, init)

        # If a shape has been given, initialize the primary array.
        if is_shape:
            primary_array_name = self.get_primary_array_name()
            if not primary_array_name:
                raise TypeError(
                    "Shape passed to DataModel.__init__, but model has "
                    "no primary array in its schema")

            # Initialization occurs when the primary array is first
            # referenced. Do so now.
            getattr(self, primary_array_name)

        # initialize arrays from keyword arguments when they are present

        for attr, value in kwargs.items():
            if value is not None:
                subschema = properties._get_schema_for_property(
                    self._schema, attr)
                if 'datatype' in subschema:
                    setattr(self, attr, value)

        # Call hook that sets model properties
        self.on_init(init)