Ejemplo n.º 1
0
def table_to_hdulist(table):
    '''Create an HDUList from a set of headers and an astropy Table

    Parameters
    ----------
    table: astropy.table.Table
           The table to create the HDUList from. Note that the extra parameter, table.meta,
           if present, is expected to be a dictionary of keyword/value pairs to place into the
           primary HDU as FITS cards.

    Returns
    -------
    astropy.io.fits.HDUlist
        The HDUlist.
    '''

    # Create the Primary HDU
    fits_header = fits.Header()
    fits_header.update(table.meta)
    hdu_primary = fits.PrimaryHDU(header=fits_header)

    # Create the Table HDU
    hdu_table = fits.TableHDU(np.array(table))

    # Put it all together.
    return fits.HDUList([hdu_primary, hdu_table])
Ejemplo n.º 2
0
 def integrationParameters(self):
     hdu = fits.TableHDU(name='parameters')
     hdu.header['lightspd'] = self.config.lightspeed()
     hdu.header['omegaM'], hdu.header['omegaK'], hdu.header[
         'omegaL'] = self.omegasMKL()
     hdu.header['H0'] = self.H0()
     return hdu
Ejemplo n.º 3
0
def test_read_returns_image(tmpdir):
    # Test if CCData.read returns a image when reading a fits file containing
    # a table and image, in that order.
    tbl = Table(np.ones(10).reshape(5, 2))
    img = np.ones((5, 5))
    hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()),
                              fits.ImageHDU(img)])
    filename = tmpdir.join('table_image.fits').strpath
    hdul.writeto(filename)
    ccd = CCDData.read(filename, unit='adu')
    # Expecting to get (5, 5), the size of the image
    assert ccd.data.shape == (5, 5)
Ejemplo n.º 4
0
    def _write_fits(self, filename, implementation=DATAIMPL):
        """Write an FITS file representation of the configuration state.
        """
        if implementation != self.DATAIMPL and implementation != self.HEADERIMPL:
            print("Implementation :",
                  implementation,
                  'not allowed',
                  file=sys.stderr)
            return
        # hduList will contain one TableHDU per section
        hduList = fits.HDUList()

        # get all Configuration entries
        # loop over section
        for section in self._entries.keys():
            if implementation == self.DATAIMPL:
                # prepare 3 array
                key_array = []
                value_array = []
                comment_array = []
                # loop over section entries and fill arrays
                for key, value_comment in self._entries[section].items():
                    key_array.append(key)
                    value_array.append(value_comment[self.VALUE_INDEX])
                    comment_array.append(value_comment[self.COMMENT_INDEX])
                # create fits.Column form filled arrays
                ckey = fits.Column(name='key',
                                   format='A256',
                                   array=np.array(key_array))
                cvalue = fits.Column(name='value',
                                     format='A256',
                                     array=np.array(value_array))
                ccomment = fits.Column(name='comments',
                                       format='A256',
                                       array=np.array(comment_array))
                # Create the table
                hdu = fits.TableHDU.from_columns([ckey, cvalue, ccomment])
                hdu.name = section
                # append table to hduList
                hduList.append(hdu)

            elif (implementation == self.HEADERIMPL):
                header = fits.Header()
                for key, value_comments in self._entries[section].items():
                    header[key] = (value_comments[self.VALUE_INDEX],
                                   value_comments[self.COMMENT_INDEX])

                table_0 = fits.TableHDU(data=None, header=header, name=section)
                hduList.append(table_0)

        hduList.writeto(filename, clobber=True)
Ejemplo n.º 5
0
def traitlets_config_to_fits(config, fits_filename, clobber=True):
    '''Write a FITS file that represents configuration.

    Parameters
    ----------
    config : traitlets.config.loader.Config
        a traitlets.config.loader.Config to write in FITS format
    fits_filename : str
        FITS file name to write
    clobber : bool
        When True, overwrite the output file if exists.

    Raises
    ------
    OSError : If FITS file containing the traitlets config is not written
    '''

    if not isinstance(config, Config):
        raise TypeError(
            'Config must be an instance of traitlets.config.loader.Config')

    # hduList will contain one TableHDU per section
    hduList = fits.HDUList()
    # get all Configuration entries
    # loop over section
    for section, entry in config.items():
        header = fits.Header()
        for key, value in entry.items():

            # CONTINUE and HIERARCH are incompatible, so we have to decide
            # to either truncate key or value. I went for the key. @maxnoe
            if isinstance(value, str):
                if len(key) > 8 and (len(key) + len(value)) > 70:
                    warnings.warn('Key "{}" will be truncated to {}'.format(
                        key, key[:8]))
                    key = key[:8]

            header[key] = value

        table_0 = fits.TableHDU(data=None, header=header, name=section)
        hduList.append(table_0)
    try:
        hduList.writeto(fits_filename, overwrite=True)
    except OSError:
        logging.exception('Could not do save {}'.format(fits_filename))
        raise
Ejemplo n.º 6
0
    def test_data_in_other_hdu(self, tmpdir):
        tbl = Table(np.ones(10).reshape(5, 2))
        data = 100 * np.ones(self.shape)
        hdul = fits.HDUList(hdus=[
            fits.PrimaryHDU(),
            fits.TableHDU(tbl.as_array()),
            fits.ImageHDU(data)
        ])
        fname = tmpdir.join('test_table.fits').strpath
        hdul.writeto(fname)

        logs = []
        lh = log_to_list(logger, logs, full_record=True)
        f = _extract_fits(fname)
        assert_equal(f['data'], 100 * np.ones(self.shape))
        assert_equal(f['unit'], None)

        # ensure log emitting
        logs = [i for i in logs if i.message == 'First hdu with image data: 2']
        assert_equal(len(logs), 1)
        assert_equal(logs[0].levelname, 'INFO')

        logger.removeHandler(lh)
Ejemplo n.º 7
0
def generate_fits_files(tmpdir, psflib):
    np.random.seed(42)

    # Generate fake time axis
    exptime = 60  # sec
    times = np.arange(0, 10 / 60 / 24, 1 / 60 / 24) + 59283
    bjd = 2400000.5 + times

    # Generate fake DRP fluxes
    fluxes = np.ones_like(times)

    # Generate fake subarray images:
    n_pixels = 200
    psf_lib = load(open(psflib, 'rb'))
    x = np.linspace(-100, 99, 200)
    xx, yy = np.mgrid[0:200, 0:200]
    mask = (xx - 100) ** 2 + (yy - 100) ** 2 < 70 ** 2
    sa = mask * psf_lib[0](x, x)
    image_cube = np.repeat(1e6 * sa[None, :, :], len(times), axis=0)
    image_cube += 1e-5 * np.random.randn(*image_cube.shape) + np.ones(
        image_cube.shape)
    unit_vector = np.ones_like(times)

    data = {
        'SCI_RAW_Attitude': {
            'header':
                {
                    'TEXPTIME': exptime,
                    'EXPTIME': exptime,
                    'NAXIS2': len(times),
                },
            'data':
                {
                    'SC_ROLL_ANGLE': np.degrees(
                        np.cos(2 * np.pi * times / 100 / 60 / 24)),
                    'SC_DEC': unit_vector,
                    'SC_RA': unit_vector,
                    'MJD_TIME': times,
                    'BJD_TIME': bjd,
                },
            'ext': 1
        },
        'SCI_RAW_HkExtended': {
            'header':
                {
                    'TEXPTIME': exptime,
                    'EXPTIME': exptime,
                },
            'data':
                {
                    'VOLT_FEE_VOD': unit_vector,
                    'VOLT_FEE_VRD': unit_vector,
                    'VOLT_FEE_VOG': unit_vector,
                    'VOLT_FEE_VSS': unit_vector,
                    'VOLT_FEE_CCD': unit_vector,
                    'thermFront_2': unit_vector
                },
            'ext': 9
        },
        'REF_APP_GainCorrection': {
            'header':
                {
                    'TEXPTIME': exptime,
                    'EXPTIME': exptime,
                    'VOD_OFF': 22,
                    'VRD_OFF': 9,
                    'VOG_OFF': -5.75,
                    'VSS_OFF': 8.8,
                    'TEMP_OFF': -40,
                    'GAIN_NOM': 0.52,
                },
            'data':
                {
                    'FACTOR': unit_vector - 1,
                    'EXP_VOD': unit_vector,
                    'EXP_VRD': unit_vector,
                    'EXP_VOG': unit_vector,
                    'EXP_VSS': unit_vector,
                    'EXP_TEMP': unit_vector
                },
            'ext': 1
        },
        'SCI_CAL_SubArray': {
            'data':
                {
                    'MJD_TIME': times,
                    'BJD_TIME': bjd,
                    'RON': 3.4 * unit_vector,
                    'BIAS': 0 * unit_vector
                },
            'header': {
                'X_WINOFF': 0,
                'Y_WINOFF': 0,
                'V_STRT_M': 0,
                'NEXP': len(times),
                'TEXPTIME': exptime,
                'EXPTIME': exptime,
            },
            'ext': 2
        },
        'SCI_COR_SubArray': {
            'data': image_cube,
            'header': {
                'X_WINOFF': 0,
                'Y_WINOFF': 0,
                'V_STRT_M': 0,
                'NEXP': len(times),
                'TEXPTIME': exptime,
                'EXPTIME': exptime,
            },
            'ext': 1
        },
        'RAW_SubArray': {
            'data':
                {
                    'MJD_TIME': times,
                    'BJD_TIME': bjd
                },
            'header': {
                'NEXP': len(times),
                'TEXPTIME': exptime,
                'EXPTIME': exptime,
            },
            'ext': 2
        },
        'PIP_COR_PixelFlagMapSubArray': {
            'data': image_cube,
            'ext': 1
        },
        'SCI_COR_Lightcurve-DEFAULT': {
            'data': fluxes
        },
        'SCI_COR_Lightcurve-OPTIMAL': {
            'data': fluxes
        },
        'SCI_COR_Lightcurve-RINF': {
            'data': fluxes
        },
        'SCI_COR_Lightcurve-RSUP': {
            'data': fluxes
        },
        'EXT_PRE_StarCatalogue': {
            'data': {
                'T_EFF': [5800],
                'distance': [10],
                'MAG_CHEOPS': [10],
                'RA': [1.0],
                'DEC': [1.0]
            },
        },
        "SCI_RAW_Imagette": {
            'data': image_cube
        },
    }

    default_header = fits.Header({
        'X_WINOFF': n_pixels // 2,
        'Y_WINOFF': n_pixels // 2,
        'V_STRT_M': 0,
        'NEXP': len(times),
        'TEXPTIME': exptime,
        'EXPTIME': exptime,
        'RO_FREQU': 1,
    })

    sci_raw_table = {
        "X_OFF_FULL_ARRAY": 0 * unit_vector,
        "Y_OFF_FULL_ARRAY": 0 * unit_vector,
        "X_OFF_SUB_ARRAY": 0 * unit_vector,
        "Y_OFF_SUB_ARRAY": 0 * unit_vector,
        "MJD_TIME": times,
        "BJD_TIME": bjd
    }

    for filename in data:
        path = os.path.join(tmpdir, filename + '.fits')
        ext = data[filename].get('ext', 1)
        hdu = fits.HDUList(
            [fits.PrimaryHDU(image_cube, header=default_header)] + (ext) * [
                fits.ImageHDU(image_cube)])
        if ext == 1:
            if filename == 'SCI_RAW_Imagette':
                hdu.append(
                    fits.TableHDU(Table(sci_raw_table).to_pandas().to_records()))
            elif filename == 'PIP_COR_PixelFlagMapSubArray':
                hdu.append(fits.ImageHDU(image_cube))
            else:
                hdu.append(fits.TableHDU(Table(
                    {'MJD_TIME': times, 'BJD_TIME': bjd}).to_pandas().to_records()))
        elif ext > 2:
            hdu[1] = fits.TableHDU(
                Table(data['SCI_RAW_HkExtended']['data']).to_pandas().to_records())
            hdu[2] = fits.TableHDU(Table(
                {'MJD_TIME': times, 'BJD_TIME': bjd}).to_pandas().to_records())

        if isinstance(data[filename]['data'], dict):
            table_rec = Table(data[filename]['data']).to_pandas().to_records()
            header = fits.Header(data[filename].get('header', ""))
            hdu[ext] = fits.TableHDU(table_rec, header=header)
        elif isinstance(data[filename]['data'], np.ndarray):
            header = fits.Header(data[filename].get('header', ""))
            hdu[ext] = fits.ImageHDU(data[filename]['data'], header=header)

        if filename == 'RAW_SubArray':
            hdu.extend((9 - len(hdu) + 1) * [
                fits.TableHDU(Table(sci_raw_table).to_pandas().to_records())])
            table_rec = Table(
                data['SCI_RAW_HkExtended']['data']).to_pandas().to_records()
            hdu[9] = fits.TableHDU(table_rec)
        hdu.writeto(path, overwrite=True)

    teffs = [2450., 2500., 2650., 2800., 3070., 3200., 3310., 3370.,
             3420., 3470., 3520., 3580., 3650., 3720., 3790., 3850.,
             4060., 4205., 4350., 4590., 4730., 4900., 5080., 5250.,
             5410., 5520., 5630., 5700., 5770., 5800., 5830., 5860.,
             5945., 6030., 6115., 6200., 6280., 6360., 6440., 6590.,
             6740., 6890., 7050., 7200., 7440., 7500., 7800., 8000.,
             8080., 8270., 8550., 8840., 9200., 9700., 10700., 12500.,
             14000., 14500., 15700., 16700., 17000., 20600., 26000., 31500.,
             32500., 34500., 36500.]

    flats = np.ones((len(teffs), 1024, 1024))

    flat_rec = Table({'T_EFF': teffs,
                      'DATA_TYPE': len(teffs) * ['FLAT FIELD']}
    ).to_pandas(index=False).to_records(
        index_dtypes="<i8",
        column_dtypes={'DATA_TYPE': "S16"}
    )

    flat_path = os.path.join(tmpdir, 'flats.fits')
    hdu = fits.HDUList([fits.PrimaryHDU(np.ones((1024, 1024))),
                        fits.ImageHDU(flats),
                        fits.TableHDU.from_columns(flat_rec)])

    hdu.writeto(flat_path)
Ejemplo n.º 8
0
    def to_hdu(self,
               hdu_mask='MASK',
               hdu_uncertainty='UNCERT',
               key_uncertainty_type='UTYPE'):
        """Create a HDUList from a ImageHDU object
        
        Parameters
        ----------
        hdu_mask, hdu_uncertainty : str, optional
            If it is a string append this attribute to the HDUList as 
            `astropy.io.fits.ImageHDU` with the string as the extension name.
            Default is `MASK` for hdu_mask, `UNCERT` for uncertainty and `None`
            for flags.

        key_uncertainty_type : str, optional
            The header key name for the class name of the uncertainty (if any)
            that is used to store the uncertainty type in the uncertainty hdu.
        
        Raises
        ------
        ValueError
            - If `self.__mask` is set but not a `numpy.ndarray`.
            - If `self.__uncertainty` is set but not a astropy uncertainty type
            - If `self.__uncertainty` is set but has another unit than `self.__data`.
        
        Returns
        -------
        hdulist:
            `astropy.io.fits.HDUList`
        """

        # ----------------------------------HDU0------------------------------
        # Create a copy of the meta data to avoid changing of the header of data
        if self.__meta is not None:
            header = fits.Header(self.__meta.copy())

        else:
            header = fits.Header()
        # Create a FITS Safe header from the meta data

        # Cross-check if the metadata is fits safe, if not then it is corrected
        for k, v in header.items():
            _insert_in_metadata_fits_safe(header, k, v)

        if self.__wcs:
            # Create a header for a given wcs object
            # Hard-Coded relax parameter to write all
            # recognized informal extensions of the WCS standard.
            wcs_header = self.__wcs.to_header(relax=True)
            header.extend(wcs_header, useblanks=False, update=True)

        # Create a FITS header for storing missing_axis
        # MISSING0 : 1 if axis 1 is missing
        # other keywords are skipped, if the axis is present
        if self.__missing_axis:
            header0 = fits.Header()
            for index, axis in enumerate(self.__missing_axis):
                if axis:
                    header0[MISSING.format(index)] = 1
                else:
                    continue

            for k, v in header0.items():
                _insert_in_metadata_fits_safe(header0, k, v)

            header.extend(header0, useblanks=False, update=True)

        # PrimaryHDU list contains only meta, missing_axis and wcs as headers, no data
        hdus = [fits.PrimaryHDU(data=None, header=header)]

        #------------------------------HDU0----------------------------------

        #------------------------------HDU1----------------------------------
        # Header for unit
        if self.__unit:
            header_unit = fits.Header()
            if self.__unit is not u.dimensionless_unscaled:
                header_unit['bunit'] = self.__unit.to_string()
            hdus.append(fits.ImageHDU(self.__data, header_unit, name='UNIT'))

        #------------------------------HDU1----------------------------------

        #------------------------------HDU2----------------------------------
        # Store the uncertainty
        if self.__uncertainty is not None:
            # NOTE: Comments copied from `astropy.nddata.ccdata`, displayed here for reference
            # We need to save some kind of information which uncertainty was
            # used so that loading the HDUList can infer the uncertainty type.
            # No idea how this can be done so only allow StdDevUncertainty.
            uncertainty_cls = self.__uncertainty.__class__
            if uncertainty_cls not in _known_uncertainties:
                raise ValueError(
                    'only uncertainties of type {} can be saved.'.format(
                        _known_uncertainties))
            uncertainty_name = _unc_cls_to_name[uncertainty_cls]

            hdr_uncertainty = fits.Header()
            hdr_uncertainty[key_uncertainty_type] = uncertainty_name

            # Assuming uncertainty is an StdDevUncertainty save just the array
            # this might be problematic if the Uncertainty has a unit differing
            # from the data so abort for different units. This is important for
            # astropy > 1.2
            if (hasattr(self.__uncertainty, 'unit')
                    and self.__uncertainty.unit is not None):
                if not _uncertainty_unit_equivalent_to_parent(
                        uncertainty_cls, self.__uncertainty.unit, self.__unit):
                    raise ValueError(
                        'saving uncertainties with a unit that is not '
                        'equivalent to the unit from the data unit is not '
                        'supported.')
            hduUncert = fits.ImageHDU(self.__uncertainty,
                                      hdr_uncertainty,
                                      name='UNCERT')
            hdus.append(hduUncert)
        #----------------------------HDU2--------------------------------

        #----------------------------HDU3------------------------------
        # Store the mask
        if self.__mask is not None:
            # Always assuming that the mask is a np.ndarray
            # by checking that it has a shape
            if not hasattr(self.__mask, 'shape'):
                raise ValueError('only a numpy.ndarray mask can be saved.')
            hduMask = fits.ImageHDU(self.__mask.astype(np.uint8),
                                    name=hdu_mask)
            hdus.append(hduMask)

        #----------------------------HDU3-------------------------------
        # Store the extra_coords
        if self.__extra_coords is not None:

            # Set up the data
            flattened_list_of_dict = flatten(self.__extra_coords)

            # We convert the list of dictionary to pandas dataframe
            # and then to numpy.ndarray
            dframe = pd.DataFrame(flattened_list_of_dict)
            extra_coords = dframe.values

            # Set up the header
            header3 = fits.Header()
            for index, _, value in enumerate(extra_coords):
                header3[EXTRA_COORDS.format(index)] = value

            # Make sure all the keywords are FITS safe
            for k, v in header3.items():
                _insert_in_metadata_fits_safe(header3, k, v)

            # Setting up the Header
            hdu_extra_coords = fits.Header()
            hdu_extra_coords.extend(header3, useblanks=False, update=True)

            hdus.append(
                fits.TableHDU(data=extra_coords,
                              header=hdu_extra_coords,
                              name='EXTRA_COORDS'))
        #--------------------------HDU3---------------------------------

        hdulist = fits.HDUList(hdus)
        return hdulist
Ejemplo n.º 9
0
Archivo: fit4.py Proyecto: AndyHwh/FITS
from astropy.io import fits
from PIL import Image
import numpy as np

ims = Image.open("/users/wangfeng/desktop/天算法/me.jpg").convert("L")
hdu = fits.PrimaryHDU(ims)
hdu3 = fits.TableHDU()
print(type(hdu), '\n', hdu)
# hdulist = fits.HDUList([hdu])
# hdulist.writeto('new1.fits')
# hdu.writeto('ne6.fits')
hdu1 = fits.open('new1.fits', mode='update')
hdu1.info()
head = hdu1[0].header
head.set('TITLE', 'Myself', 'Handsome as I am')
head.set('MODEL', 'Che1-CL10', 'Honor 4X')
head.set('DATE', '2017-4-29', 'Photo: Creation Date')
head.set('EXPOSURE', '1/120', 'Unit: s')
head.set('APERTURE', '2.4', 'Aperture Value')
head.set('IOS', '100', 'None')
head.set('FL', '22', 'FL: Focal length  Unit: mm')
head.set('WB', 'Auto', 'WB: White balance')
head.set('FLASH', 'No flash', 'None')

data1 = np.array(['Exif Version', 'FlashPix Version'])
data2 = np.array(['1.00', '2.4'])
col1 = fits.Column(name='property', format='20A', array=data1)
col2 = fits.Column(name='parament', format='20A', array=data2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.TableHDU.from_columns(cols)
tbhdu.update('new1.fits')
Ejemplo n.º 10
0
def json_to_fits(json_filename, fits_filename, clobber=True):
    '''Write a FITS file that represents json file for traitlets configuration.

    Parameters
    ----------
    json_filename : str
        json file name containing traitlets Configuration.
        Only one level of section is allowed.
    fits_filename : str
        FITS file name to write
    clobber : bool
        When True, overwrite the output file if exists.

    Raises
    ------
    OSError : if FITS file containing a copy of json content is not written
    FileNotFoundError : if fits_filename could not be open
    '''
    try:
        f = open(json_filename, 'r')
        # hduList will contain one TableHDU per section
        hduList = fits.HDUList()
        json_object = load(f)
        # Create a global header for key/value that not depend of a section
        global_header = fits.Header()

        # loop over json entries (corresponding to Python class or general purpose)
        for section, entry in json_object.items():
            header = fits.Header()

            if isinstance(entry, dict):
                for key, value in entry.items():
                    if isinstance(value, dict):
                        raise ValueError(
                            'Malformed json file: fits header cannot contain subobjects'
                        )

                    # CONTINUE and HIERARCH are incompatible, so we have to
                    # decide to either truncate key or value.
                    # I went for the key. @maxnoe
                    if isinstance(value, str):
                        if len(key) > 8 and (len(key) + len(value)) > 70:
                            warnings.warn(
                                'Key "{}" will be truncated to {}'.format(
                                    key, key[:8]))
                            key = key[:8]

                    header[key] = value

                # create a new TableHDU with current header and append it to hduList
                table_0 = fits.TableHDU(data=None, header=header, name=section)
                hduList.append(table_0)

            else:
                # entry is not a dictionary of key value but a simple value.
                # That means last section is not a section but a general purpose entry
                global_header[section] = entry

        # create a new TableHDU for global_header and append it to hduList
        table_0 = fits.TableHDU(data=None, header=global_header, name='GLOBAL')
        hduList.append(table_0)
        # write hduList to FITS file
        try:
            hduList.writeto(fits_filename, overwrite=True)
        except OSError:
            logging.exception('Could not do save {}'.format(fits_filename))
            raise

    except FileNotFoundError:
        logging.exception('Could not open  {}'.format(fits_filename))
        raise
Ejemplo n.º 11
0
def make_effects_hdu(effects):
    # .. todo:: decide what goes into the effects table of meta data
    return fits.TableHDU()
Ejemplo n.º 12
0
def write_brick_file(band, brickname, Flux, InvVar, Wavelength, Resolution,
                     truth):
    """
    Write brick file

    HDU0 	FLUX 	IMAGE 	Spectral flux [nspec, nwave]
    HDU1 	IVAR 	IMAGE 	Inverse variance of flux
    HDU2 	WAVELENGTH 	IMAGE 	1D common wavelength grid in Angstroms
    HDU3 	RESOLUTION 	IMAGE 	3D sparse resolution matrix data [nspec,ndiag,nwave]
    HDU4 	FIBERMAP 	BINTABLE 	Fibermap entries

    HDU0
    NAXIS1 	3494 	int 	Number of wavelength bins
    NAXIS2 	51 	int 	Number of spectra
    EXTNAME 	FLUX 	str 	erg/s/cm^2/Angstrom

    HDU1
    NAXIS1 	3494 	int 	Number of wavelength bins
    NAXIS2 	51 	int 	Number of spectra
    EXTNAME 	IVAR 	str 	1 / (erg/s/cm^2/A)^2

    HDU2
    NAXIS1 	3494 	int 	Number of wavelength bins
    NAXIS2 	51 	int 	Number of spectra
    EXTNAME 	IVAR 	str 	1 / (erg/s/cm^2/A)^2

    HDU3
    NAXIS1 	3494 	int 	Number of wavelength bins
    NAXIS2 	21 	int 	Number of diagonals
    NAXIS3 	51 	int 	Number of spectra
    EXTNAME 	RESOLUTION 	str 	no dimension

    HDU4
    NAXIS1 	224 	int 	length of dimension 1
    NAXIS2 	51 	int 	Number of spectra
    """

    NSpectra, NWavelength = Flux.shape

    outfile = 'brick-%s-%s.fits' % (band, brickname)

    head0 = fits.Header()
    head0.append(card=('NAXIS1', NWavelength, 'Number of wavelength bins'))
    head0.append(card=('NAXIS2', NSpectra, 'Number of spectra'))
    head0.append(card=('EXTNAME', 'FLUX', 'erg/s/cm^2/Angstrom'))

    hdu0 = fits.PrimaryHDU(data=Flux, header=head0)

    hdu1 = fits.ImageHDU(data=InvVar, name='IVAR')

    hdu2 = fits.ImageHDU(data=Wavelength, name='WAVELENGTH')

    hdu3 = fits.ImageHDU(data=Wavelength, name='RESOLUTION')

    # Create HDU4

    hdu4 = fits.TableHDU(name='FIBERMAP')

    col1 = fits.Column(name='TRUEZ', format='D', array=truth['TRUEZ'])
    col2 = fits.Column(name='GBANDT', format='D', array=truth['GBANDT'])
    col3 = fits.Column(name='RBANDT', format='D', array=truth['RBANDT'])
    col4 = fits.Column(name='ZBANDT', format='D', array=truth['ZBANDT'])
    col5 = fits.Column(name='W1BANDT', format='D', array=truth['W1BANDT'])
    col6 = fits.Column(name='W2BANDT', format='D', array=truth['W2BANDT'])
    col7 = fits.Column(name='TMPID', format='I', array=truth['TMPID'])

    hdu4 = fits.BinTableHDU.from_columns(
        [col1, col2, col3, col4, col5, col6, col7])

    hdulist = fits.HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
    hdulist.writeto(outfile, clobber=True)
Ejemplo n.º 13
0
def mkgrid(apred='r10',
           telescope='apo25m',
           lsfid=5440020,
           waveid=2420038,
           clobber=True):

    # create parameters of sample and read in
    if clobber: sample.sample(gridclass='rv')
    stars = ascii.read('test_rv')

    # synthesize spectra and read in. Will fail on hot stars, but add those below
    if clobber: synth.mksynth('test_rv')
    grid = fits.open('test_rv.fits')[1].data
    pars = fits.open('test_rv.fits')[0].data

    # load output parameters of successful syntheses
    n = len(stars)
    outpar = np.zeros(n,
                      dtype=[('teff', 'f4'), ('logg', 'f4'), ('mh', 'f4'),
                             ('am', 'f4'), ('cm', 'f4')])
    for i in range(pars.shape[0]):
        outpar['teff'][i] = pars[i, 0]
        outpar['logg'][i] = pars[i, 1]
        outpar['mh'][i] = pars[i, 2]
        outpar['am'][i] = pars[i, 3]
        outpar['cm'][i] = pars[i, 4]
    nout = pars.shape[0]

    # Add in hot stars
    a = fits.open(
        os.environ['APOGEE_SPECLIB'] +
        '/synth/synspec/kurucz/solarisotopes/sBA/ap00cp00np00vp20.fits')
    wav = spectra.fits2vector(a[0].header, 1)
    hot = np.where(stars['Teff'] > 8000)[0]
    init = 0
    for i in hot:
        te = stars['Teff'][i]
        logg = stars['logg'][i]
        mh = stars['[M/H]'][i]
        ite = int(round((te - a[0].header['CRVAL2']) / a[0].header['CDELT2']))
        ilogg = int(
            round((logg - a[0].header['CRVAL3']) / a[0].header['CDELT3']))
        imh = int(round((mh - a[0].header['CRVAL4']) / a[0].header['CDELT4']))
        print(te, logg, mh, ite, ilogg, imh)
        if ite >= 0 & ilogg >= 0 & imh >= 0:
            spec = a[0].data[imh, ilogg, ite, :].reshape(1, 38001)
            # need to try to remove continuum from hot grid
            baseline = np.polynomial.Polynomial.fit(np.log(wav), spec[0, :], 1)
            gd = np.where(spec[0, :] / baseline(np.log(wav)) > 0.99)[0]
            baseline = np.polynomial.Polynomial.fit(np.log(wav[gd]),
                                                    spec[0, gd], 1)

            grid = np.append(grid, spec / baseline(np.log(wav)), axis=0)
            outpar['teff'][nout] = te
            outpar['logg'][nout] = logg
            outpar['mh'][nout] = mh
            outpar['am'][nout] = 0.
            outpar['cm'][nout] = 0.
            nout += 1
    # Trim to successful syntheses
    outpar = outpar[0:nout]

    # output FITS file
    hdu = fits.HDUList()
    h = fits.PrimaryHDU()
    h.header['COMMENT'] = 'Synthetic RV grids'
    h.header['COMMENT'] = 'HDU #1: parameter table'
    #h.header['COMMENT'] = 'HDU #2: high resolution, unsmoothed'
    h.header[
        'COMMENT'] = 'HDU #2: LSF smoothed, apStar resolution (log, 6.e-6 dispersion)'
    h.header[
        'COMMENT'] = 'HDU #3: LSF smoothed, apVisit type resolution (log, 4.75e-6 dispersion)'
    hdu.append(h)

    # parameter table
    hdu.append(fits.TableHDU(outpar))

    # raw spectra (currently commented out)
    h = fits.ImageHDU(grid)
    h.header['CRVAL1'] = 15100.
    h.header['CDELT1'] = 0.05
    h.header['CTYPE1'] = 'WAVELENGTH'
    h.header['LSFID'] = lsfid
    h.header['WAVEID'] = waveid
    h.header['APRED'] = apred
    #hdu.append(h)

    # put on logarithmic wavelength scale and smooth to R=30000
    nspec = grid.shape[0]
    resol = 30000.
    ws = spectra.airtovac(np.linspace(15100., 17000., 38001))
    wout = 10.**(np.arange(aspcap.logw0, np.log10(17000.), aspcap.dlogw / 9.))
    smooth = np.zeros([nspec, len(wout)])
    sig = 1. / (aspcap.dlogw / 9. * np.log(10)) / resol / 2.354
    for i in range(nspec):
        # Interpolate the input spectrum
        ip = interpolate.InterpolatedUnivariateSpline(ws, grid[i, :], k=3)
        smooth[i, :] = ndimage.gaussian_filter1d(ip(wout), sig, mode='nearest')

    # now ressample to apstar and (approx) apvisit wavelenth scales
    wapstar = 10.**(aspcap.logw0 + np.arange(aspcap.nw_apStar) * aspcap.dlogw)
    apstardata = np.zeros([nspec, len(wapstar)])
    wapvisit = 10.**(np.arange(aspcap.logw0, np.log10(17000.), 4.75e-6))
    apvisitdata = np.zeros([nspec, len(wapvisit)])
    for i in range(nspec):
        # Interpolate the input spectrum
        ip = interpolate.InterpolatedUnivariateSpline(wout, smooth[i, :], k=3)
        apstardata[i, :] = ip(wapstar)
        apvisitdata[i, :] = ip(wapvisit)

    # convolve and sample to apStar grid in vacuum
    #ws=spectra.airtovac(np.linspace(15100.,17000.,38001))
    #x,ls= synth.getlsf(lsfid,waveid,apred=apred,prefix='lsf_',fill=True)
    #apstardata=lsf.convolve(ws,grid,lsf=ls,xlsf=x)
    h = fits.ImageHDU(apstardata)
    h.header['CRVAL1'] = aspcap.logw0
    h.header['CDELT1'] = aspcap.dlogw
    h.header['CTYPE1'] = 'LOG(WAVELENGTH)'
    #h.header['LSFID'] = lsfid
    #h.header['WAVEID'] = waveid
    #h.header['APRED'] = apred
    h.header['RESOL'] = resol
    hdu.append(h)

    # convolve and sample to higher resolution for apVisit spectra
    #wout=10.**(np.arange(4.179,np.log10(17000.),4.75e-6))
    #smoothdata=lsf.convolve(ws,grid,lsf=ls,xlsf=x,highout=True)
    #nwav=smoothdata.shape[1]
    #nspec=smoothdata.shape[0]
    #apvisitdata=np.zeros([nspec,len(wout)])
    #wav=10.**(aspcap.logw0+np.arange(nwav)*aspcap.dlogw/9.)
    #for i in range(nspec) :
    #    # Interpolate the input spectrum
    #    bd=np.where(np.isnan(smoothdata[i,:]))[0]
    #    smoothdata[i,bd]=1.
    #    ip= interpolate.InterpolatedUnivariateSpline(wav, smoothdata[i,:], k=3)
    #    apvisitdata[i,:] = ip(wout)
    h = fits.ImageHDU(apvisitdata)
    h.header['CRVAL1'] = 4.179
    h.header['CDELT1'] = 4.75e-6
    h.header['CTYPE1'] = 'LOG(WAVELENGTH)'
    h.header['RESOL'] = resol
    hdu.append(h)

    # output
    hdu.writeto('apg_synthgrid.fits', overwrite=True)