Exemple #1
0
def test_table_fits_io_astropy(table):
    """Test `astropy.table.Table` FITS I/O in Astropy.

    Having these tests in Gammapy is to check / ensure that the features
    we rely on work properly for all Astropy versions we support in CI
    (currently Astropy 1.3 and up)

    This is useful, because Table FITS I/O was pretty shaky for a while
    and incrementally improved over time.

    These are the same examples that we have in the docstring
    at the top of `gammapy/utils/fits.py`.
    """
    # Check Table -> BinTableHDU
    hdu = fits.BinTableHDU(table)
    assert hdu.header["TTYPE2"] == "b"
    assert hdu.header["TFORM2"] == "K"
    assert hdu.header["TUNIT2"] == "m"

    # Check BinTableHDU -> Table
    table2 = Table.read(hdu)
    assert isinstance(table2.meta, dict)
    assert table2.meta == {"VERSION": 42}
    assert table2["b"].unit == "m"
Exemple #2
0
    def to_hdulist(self, format="ogip", hdu_bands=None, hdu_region=None):
        """Convert geom to hdulist

        Parameters
        ----------
        format : {"gadf", "ogip", "ogip-sherpa"}
            HDU format
        hdu : str
            Name of the HDU with the map data.

        Returns
        -------
        hdulist : `~astropy.io.fits.HDUList`
            HDU list

        """
        if hdu_bands is None:
            hdu_bands = "HDU_BANDS"
        if hdu_region is None:
            hdu_region = "HDU_REGION"
        if format != "gadf":
            hdu_region = "REGION"

        hdulist = fits.HDUList()

        hdulist.append(
            self.axes.to_table_hdu(hdu_bands=hdu_bands, format=format))

        # region HDU
        if self.region:
            region_table = self._to_region_table()

            region_hdu = fits.BinTableHDU(region_table, name=hdu_region)
            hdulist.append(region_hdu)

        return hdulist
Exemple #3
0
    def to_hdulist(self, format="ogip"):
        """Convert to `~astropy.io.fits.HDUList`.

        Parameters
        ----------
        format : {"ogip", "ogip-sherpa", "ogip-arf", "ogip-arf-sherpa"}
            Format specification

        Returns
        -------
        hdulist : `~astropy.fits.HDUList`
            HDU list
        """
        hdulist = fits.HDUList()

        # add data hdu
        table = self.to_table(format=format)
        hdulist.append(fits.BinTableHDU(table))

        if format in ["ogip", "ogip-sherpa"]:
            hdulist_geom = self.geom.to_hdulist(format=format)[1:]
            hdulist.extend(hdulist_geom)

        return hdulist
Exemple #4
0
hpb_type = np.dtype([('grouped', '=i4'), ('ungrouped', '=i4'),
                     ('potential', '=f4')])

bPotential = True

NSIDE = 64
N = 12 * NSIDE**2

hdr = fits.Header()
hdr['EXTEND'] = 'T'
primary_hdu = fits.PrimaryHDU(header=hdr)
primary_hdu.writeto('test.fits')

# Use BinTableHDU as a template
hdr = fits.BinTableHDU(Table(names=['SIGNAL'],
                             dtype=['=f4' if bPotential else '=i4']),
                       name='BINTABLE').header
hdr['ORDERING'] = ("RING", "Pixel ordering scheme, either RING or NESTED")
hdr['INDXSCHM'] = ("IMPLICIT", "Pixel indexing scheme (IMPLICIT or EXPLICIT)")
hdr['NSIDE'] = (NSIDE, "Resolution parameter for HEALPIX")
hdr['COORDSYS'] = ("C", "Pixelisation coordinate system")
hdr['PIXTYPE'] = ("HEALPIX", "HEALPIX Pixelisation")
hdr['NAXIS'] = 2
hdr['NAXIS2'] = N
hdr['NAXIS1'] = 1
hdr['BITPIX'] = -32 if bPotential else 32

hdu = fits.StreamingHDU('test.fits', hdr)
for file in argv[1:]:
    with open(file, 'rb') as hpb:
        data = np.fromfile(hpb, dtype=hpb_type)
Exemple #5
0
 def to_fits(self, name="EFFECTIVE AREA"):
     """Convert to `~astropy.io.fits.BinTable`."""
     return fits.BinTableHDU(self.to_table(), name=name)
def make_spw_cube(spw='spw{0}',
                  spwnum=0,
                  fntemplate='OrionSourceI',
                  overwrite_existing=False,
                  bmaj_limits=None,
                  fnsuffix="",
                  filesuffix='image.pbcor.fits',
                  first_endchannel='*',
                  cropends=False,
                  minimize=True,
                  debug_mode=False,
                  add_beam_info=True):
    """
    Parameters
    ----------
    spw : str
        String template for the input/output name
    spwnum : int
        The spectral window number
    fntemplate : str
        Filename template (goes into the glob)
    overwrite_existing : bool
        Overwrite data in the output cube?
    cropends: bool or int
        Number of pixels to crop off the ends of an image
    minimize: bool
        Compute the spatial minimal subcube before building the cube?  Slices
        for all subsequent cubes will be computed from the first cube.
    """
    if debug_mode:
        lvl = log.getEffectiveLevel()
        log.setLevel('DEBUG')

    spw = spw.format(spwnum)

    big_filename = '{1}_{0}{2}_lines.fits'.format(spw, fntemplate, fnsuffix)

    header_fn = glob.glob(
        'OrionSourceI.B3.{0}.lines0-{4}.clarkclean1000.{3}'.format(
            spw, fntemplate, fnsuffix, filesuffix, first_endchannel))
    if len(header_fn) != 1:
        raise ValueError(
            "Found too many or too few matches: {0}".format(header_fn))
    else:
        header_fn = header_fn[0]

    # First set up an empty file
    if not os.path.exists(big_filename):
        log.info("Creating large cube based on header {0}".format(header_fn))

        if minimize:
            cube0 = SpectralCube.read(header_fn)
            slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                    spatial_only=True)
            # use the calculated 3rd dimension, plus the difference of the
            # x and y slices
            #header['NAXIS2'] = slices[1].stop-slices[1].start
            #header['NAXIS1'] = slices[2].stop-slices[2].start
            header = cube0[slices].header
        else:
            header = fits.getheader(header_fn)

        # Make an arbitrary, small data before prepping the header
        data = np.zeros((100, 100), dtype=np.float32)
        hdu = fits.PrimaryHDU(data=data, header=header)
        cdelt_sign = np.sign(hdu.header['CDELT3'])
        # Set the appropriate output size (this can be extracted from the LISTOBS)
        naxis3_in = header['NAXIS3']
        header['NAXIS3'] = nchans_total[spwnum]
        header_wcs = wcs.WCS(fits.getheader(header_fn))
        header_specwcs = header_wcs.sub([wcs.WCSSUB_SPECTRAL])
        if cdelt_sign == -1:
            ind0, ind1 = getinds(header_fn)
            #5/20/2017: redoing some of this, and the text below is frightening but no longer relevant
            # a +1 was on the next line before an edit on 4/10/2017
            # it may have been rendered irrelevant when I included +1
            # channel in each cube?  Not clear - the arithmetic no longer
            # makes sense but is empirically necessary.
            assert ind0 == 0

            # these reindex the cube so that it has an increasing cdelt.
            header['CRPIX3'] = 1  #nchans_total[spwnum]
            header['CRVAL3'] = header_specwcs.wcs_pix2world(
                [nchans_total[spwnum]], 1)[0][0]
            header['CDELT3'] = np.abs(header_specwcs.wcs.cdelt[0])

            # ensure that the new CRVAL evaluated at its own position matches
            # the CRVAL3.  This should be impossible to fail unless WCS itself
            # fails
            newheaderspecwcs = wcs.WCS(header).sub([wcs.WCSSUB_SPECTRAL])
            crval3 = newheaderspecwcs.wcs_pix2world([header['CRPIX3']],
                                                    1)[0][0]
            np.testing.assert_array_almost_equal_nulp(crval3, header['CRVAL3'])

        shape = (header['NAXIS3'], header['NAXIS2'], header['NAXIS1'])

        # Write to disk
        header.tofile(big_filename)
        # Using the 'append' io method, update the *header*
        with open(big_filename, 'rb+') as fobj:
            # Seek past the length of the header, plus the length of the
            # data we want to write.
            # The -1 is to account for the final byte that we are about to
            # write:
            # 'seek' works on bytes, so divide #bits / (bytes/bit)
            fobj.seek(
                len(header.tostring()) + (shape[0] * shape[1] * shape[2] *
                                          int(np.abs(header['BITPIX']) / 8)) -
                1)
            fobj.write(b'\0')

        big_cube = SpectralCube.read(big_filename)
        header_cube = SpectralCube.read(header_fn)
        # in both cases, SpectralCube sorts the extrema
        if cdelt_sign == 1:
            np.testing.assert_array_almost_equal_nulp(
                big_cube.spectral_extrema[0].value,
                header_cube.spectral_extrema[0].value)
            np.testing.assert_array_almost_equal_nulp(
                big_cube.wcs.wcs.cdelt, header_cube.wcs.wcs.cdelt)
        elif cdelt_sign == -1:
            np.testing.assert_array_almost_equal_nulp(
                big_cube.spectral_extrema[1].value,
                header_cube.spectral_extrema[1].value)
            np.testing.assert_array_almost_equal_nulp(
                big_cube.wcs.wcs.cdelt[-1] * -1, header_cube.wcs.wcs.cdelt[-1])

        log.info("Cube creation completed.  Now moving on to populating it.")

    # Find the appropriate files (this is NOT a good way to do this!  Better to
    # provide a list.  But wildcards are quick & easy...
    fileglob = "OrionSourceI.B3.{0}.lines*{3}".format(spw, fntemplate,
                                                      fnsuffix, filesuffix)
    files = glob.glob(fileglob)
    log.info("Files to be merged with glob {0}: ".format(fileglob))
    log.info(str(files))

    # open the file in update mode (it should have the right dims now)
    hdul = fits.open(big_filename, mode='update')
    main_wcs = wcs.WCS(hdul[0].header).sub([wcs.WCSSUB_SPECTRAL])

    if add_beam_info:
        shape = hdul[0].data.shape[0]
        if len(hdul) > 1 and isinstance(hdul[1], fits.BinTableHDU):
            pass
        else:
            hdul.append(
                fits.BinTableHDU(
                    np.recarray(shape,
                                names=['BMAJ', 'BMIN', 'BPA', 'CHAN', 'POL'],
                                formats=['f4', 'f4', 'f4', 'i4', 'i4'])))

    # sorted so that we deal with zero first, since it has potential to be a problem.
    for fn in ProgressBar(sorted(files)):
        log.info("inds={0} fn={1}".format(getinds(fn), fn))
        ind0, ind1 = getinds(fn)

        # this is not correct...?
        # or maybe it only applies if cropends is set....
        # if ind0 == 0:
        #     ind1 = ind1 + 1

        cdelt = fits.getheader(fn)['CDELT3']
        if 'cdelt_sign' not in locals():
            cdelt_sign = np.sign(cdelt)
            log.warn("cdelt_sign was not defined: overwriting a"
                     " previously-existing file.  "
                     "This may not be what you want; the data could be going "
                     "opposite the parent cube.  Check that the original "
                     "header is OK. sign(CDELT) is now {0}, "
                     "while for the big header it is {1}".format(
                         cdelt_sign,
                         np.sign(fits.getheader(big_filename)['CDELT3'])))

        if cropends:
            # don't crop 1st or last pixel in full cube
            if ind0 > 0:
                log.debug("ind0 going from {0} to {1}".format(
                    ind0, ind0 + cropends))
                ind0 = ind0 + cropends
                if cdelt_sign == 1:
                    dataind0 = cropends
                    log.debug("dataind0 going to {0}".format(cropends))
                else:
                    dataind1 = -cropends
                    log.debug("dataind1 going to {0}".format(-cropends))
            else:
                if cdelt_sign == 1:
                    dataind0 = 0
                    log.debug("dataind0 going to {0}".format(0))
                elif cdelt_sign == -1:
                    log.debug("dataind1 going to {0}".format(None))
                    dataind1 = None

            if (ind1 < nchans_total[spwnum] - 1):
                log.debug("ind1 going from {0} to {1}".format(
                    ind1, ind1 - cropends))
                ind1 = ind1 - cropends
                if cdelt_sign == 1:
                    dataind1 = -cropends
                    log.debug("dataind1 going to {0}".format(-cropends))
                elif cdelt_sign == -1:
                    dataind0 = cropends
                    log.debug("dataind0 going to {0}".format(cropends))
            else:
                if cdelt_sign == 1:
                    dataind1 = None
                else:
                    log.debug("dataind0 going to {0}".format(0))
                    dataind0 = 0
        else:
            dataind0 = 0
            dataind1 = None

        if cdelt_sign == -1:
            log.debug("Reversing indices from {0} {1} to ".format(ind0, ind1))
            ind1, ind0 = (nchans_total[spwnum] - ind0,
                          nchans_total[spwnum] - ind1)
            log.debug("{0} {1}".format(ind0, ind1))
            if ind0 < 0:
                ind0 = 0

        log.info("inds have been remapped to {0}, {1}".format(ind0, ind1))

        plane = hdul[0].data[ind0]
        if np.all(plane == 0) or overwrite_existing:
            log.info("Replacing indices {0}->{2} {1}".format(
                getinds(fn), fn, (ind0, ind1)))

            data = fits.getdata(fn)
            dwcs = wcs.WCS(fits.getheader(fn)).sub([wcs.WCSSUB_SPECTRAL])

            dataind1 = data.shape[0] + (dataind1 or 0)

            # handle the case where I made the indices NOT match the cube...
            # this is really stupid and should be removed because I should have
            # made the input cubes correct.  Oh well.
            if np.abs(ind1 - ind0) < np.abs(dataind1 - dataind0):
                dataind1 = dataind0 + np.abs(ind1 - ind0)

            if cdelt_sign == -1:
                dataind0, dataind1 = dataind1, dataind0
                dwcs0 = dwcs.wcs_pix2world([dataind0 - 1], 0)[0][0]
                dwcs1 = dwcs.wcs_pix2world([dataind1], 0)[0][0]
            else:
                dwcs0 = dwcs.wcs_pix2world([dataind0], 0)[0][0]
                dwcs1 = dwcs.wcs_pix2world([dataind1 - 1], 0)[0][0]
            hwcs0 = main_wcs.wcs_pix2world([ind0], 0)[0][0]
            hwcs1 = main_wcs.wcs_pix2world([ind1 - 1], 0)[0][0]

            if not np.isclose(hwcs0, dwcs0, atol=0.5 * np.abs(cdelt), rtol=0):
                log.error(
                    "current data, big cube indices: {0},{1} and {2},{3}".
                    format(dataind0, dataind1, ind0, ind1))
                raise ValueError(
                    "World coordinates of first pixels do not match: {0} - {1} = {2} ({3} cdelt)"
                    .format(dwcs0, hwcs0, dwcs0 - hwcs0,
                            (dwcs0 - hwcs0) / cdelt))
            if not np.isclose(hwcs1, dwcs1, atol=0.5 * np.abs(cdelt), rtol=0):
                log.error(
                    "current data, big cube indices: {0},{1} and {2},{3}".
                    format(dataind0, dataind1, ind0, ind1))
                raise ValueError(
                    "World coordinates of last pixels do not match: {0} - {1} = {2} ({3} cdelt)"
                    .format(dwcs1, hwcs1, dwcs1 - hwcs1,
                            (dwcs1 - hwcs1) / cdelt))

            if 'slices' not in locals():
                if minimize:
                    log.info("Determining slices")
                    cube0 = SpectralCube.read(header_fn)
                    slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                            spatial_only=True)
                    log.info("Slices are {0}".format(slices))
                else:
                    slices = (slice(None), ) * 3

            if bmaj_limits is not None:
                log.info("Identifying acceptable beams")
                beamtable = fits.open(fn)[1]
                ok_beam = ((beamtable.data['BMAJ'] > bmaj_limits[0]) &
                           (beamtable.data['BMAJ'] < bmaj_limits[1]))
                data[~ok_beam] = np.nan
                log.info("Found {0} bad beams of {1}".format((~ok_beam).sum(),
                                                             ok_beam.size))

            if cdelt_sign == -1:
                if dataind1 == 0:
                    dataslice = slice(dataind0 - 1, None, -1)
                elif dataind1 >= 1:
                    dataslice = slice(dataind0 - 1, dataind1 - 1, -1)
                else:
                    raise ValueError("Something is wrong with dataind0")
            else:
                dataslice = slice(dataind0, dataind1, 1)
            log.info("Dataslice is {0}".format(dataslice))

            assert hdul[0].data[ind0:ind1].shape == data[dataslice, slices[1],
                                                         slices[2]].shape

            if not debug_mode:
                if add_beam_info:
                    log.info("Adding beam information")
                    beamtable = fits.open(fn)[1]
                    hdul[1].data[ind0:ind1] = beamtable.data[dataslice]

                log.info("Inserting data")
                hdul[0].data[ind0:ind1, :, :] = data[dataslice, slices[1],
                                                     slices[2]]
                log.info("Flushing")
                hdul.flush()
                log.info("Done with iteration for {0}".format(fn))

    if debug_mode:
        log.setLevel(lvl)
Exemple #7
0
hdu_table.meta.pop("BASE_DIR")

# Add the rows for the background HDUs
hdu_table = vstack([hdu_table, hdu_table_bkg])
hdu_table.sort("OBS_ID")

# In[ ]:

hdu_table[:7]

# In[ ]:

# Put index tables and background models in a FITS file
hdu_list = fits.HDUList()

hdu = fits.BinTableHDU(hdu_table)
hdu.name = "HDU_INDEX"
hdu_list.append(hdu)

hdu = fits.BinTableHDU(data_store.obs_table)
hdu_list.append(hdu)

for idx, model in enumerate(models):
    hdu = model.to_fits()
    hdu.name = "BKG{}".format(idx)
    hdu_list.append(hdu)

print([_.name for _ in hdu_list])

import os
Exemple #8
0
 def to_fits(self, name="BACKGROUND"):
     """Convert to `~astropy.io.fits.BinTableHDU`."""
     return fits.BinTableHDU(self.to_table(), name=name)
Exemple #9
0
def _write_filename(filename, c_hdr, s_hdr, cov_index_map, sparse_map,
                    compress=False, compress_tilesize=None):
    """
    Write to a filename, using fitsio or astropy.io.fits.

    This assumes that you want to overwrite any existing file (as
    should be checked in calling function).

    Parameters
    ----------
    filename : `str`
       Name of file to write to.
    c_hdr : `fitsio.FITSHDR` or `astropy.io.fits.Header`
       Coverage index map header
    s_hdr : `fitsio.FITSHDR` or `astropy.io.fits.Header`
       Sparse map header
    cov_index_map : `np.ndarray`
       Coverage index map
    sparse_map : `np.ndarray`
       Sparse map
    compress : `bool`, optional
       Write with FITS compression?
    """
    # Currently, all writing is done with astropy.io.fits because it supports
    # lossless compression of floating point data.  Unfortunately, the header
    # is wrong so we have a header patch below.

    c_hdr['EXTNAME'] = 'COV'
    s_hdr['EXTNAME'] = 'SPARSE'

    hdu_list = fits.HDUList()

    hdu = fits.PrimaryHDU(data=cov_index_map, header=fits.Header())
    for n in c_hdr:
        if n not in FITS_RESERVED:
            hdu.header[n] = c_hdr[n]
    hdu_list.append(hdu)

    if compress:
        hdu = fits.CompImageHDU(data=sparse_map, header=fits.Header(),
                                compression_type='GZIP_2',
                                tile_size=(compress_tilesize, ),
                                quantize_level=0.0)
    else:
        if sparse_map.dtype.fields is not None:
            hdu = fits.BinTableHDU(data=sparse_map, header=fits.Header())
        else:
            hdu = fits.ImageHDU(data=sparse_map, header=fits.Header())

    for n in s_hdr:
        if n not in FITS_RESERVED:
            hdu.header[n] = s_hdr[n]
    hdu_list.append(hdu)

    hdu_list.writeto(filename, overwrite=True)

    # When writing a gzip unquantized (lossless) floating point image,
    # current versions of astropy (4.0.1 and earlier, at least) write
    # the ZQUANTIZ header value as NO_DITHER, while cfitsio expects
    # this to be NONE for unquantized data.  The only way to overwrite
    # this reserved header keyword is to manually overwrite the bytes
    # in the file.  The following code uses mmap to overwrite the
    # necessary header keyword without loading the full image into
    # memory.  Note that healsparse files only have one compressed
    # extension, so there will only be one use of ZQUANTIZ in the file.
    if compress and not is_integer_value(sparse_map[0]):
        with open(filename, "r+b") as f:
            try:
                mm = mmap.mmap(f.fileno(), 0)
                loc = mm.find(b"ZQUANTIZ= 'NO_DITHER'")
                if loc >= 0:
                    mm.seek(loc)
                    mm.write(b"ZQUANTIZ= 'NONE     '")
            except OSError:
                # Some systems do not have the mmap available,
                # we need to read in the full file.
                data = f.read()
                loc = data.find(b"ZQUANTIZ= 'NO_DITHER'")
                if loc >= 0:
                    f.seek(loc)
                    f.write(b"ZQUANTIZ= 'NONE     '")
def save_results(wd, mode, header, neon_header, superbias, flat_coeff, WL_map,
                 Y_correction, reduct_star, star_wl, sensitivity, obj, err,
                 neon, dots, obj_bin, err_bin, sep, medium_results, sky):
    fits.PrimaryHDU(sky.astype('float32')).writeto(wd + 'sky.fits',
                                                   overwrite=True)
    fits.PrimaryHDU(medium_results['initial'].astype('float32')).writeto(
        wd + 'm_initial.fits', overwrite=True)

    if mode['bias'] == 'Yes':
        fits.PrimaryHDU(superbias.astype('float32'),
                        header=header).writeto(wd + 'sbias.fits',
                                               overwrite=True)
    if mode['bias'] != 'No':
        fits.PrimaryHDU(medium_results['bias'].astype('float32')).writeto(
            wd + 'm_bias.fits', overwrite=True)

    if mode['flat'] == 'Yes':
        hdu = fits.PrimaryHDU(flat_coeff.astype('float32'))
        hdu.writeto(wd + 'flatcoef.fits', overwrite=True)
    if mode['flat'] != 'No':
        fits.PrimaryHDU(medium_results['flat'].astype('float32')).writeto(
            wd + 'm_flat.fits', overwrite=True)

    if mode['x_lin'] == 'Yes':
        fits.PrimaryHDU(WL_map.astype('float32')).writeto(wd + 'WL_map.fits',
                                                          overwrite=True)
        fits.PrimaryHDU(neon.astype('float32'),
                        header=neon_header).writeto(wd + 'neon.fits',
                                                    overwrite=True)
    if mode['y_lin'] == 'Yes':
        fits.PrimaryHDU(Y_correction.astype('float32')).writeto(
            wd + 'Y_correction.fits', overwrite=True)
        fits.PrimaryHDU(dots.astype('float32')).writeto(wd + 'dots.fits',
                                                        overwrite=True)

    if mode['x_lin'] != 'No' or mode['y_lin'] != 'No' or mode['WL'] != 'No':
        fits.PrimaryHDU(
            medium_results['linerization'].astype('float32')).writeto(
                wd + 'm_linerization.fits', overwrite=True)

    if mode['star'] == 'Yes':
        star_image = fits.PrimaryHDU(reduct_star.astype('float32'))
        star_k = fits.BinTableHDU.from_columns([
            fits.Column(name='wavelenght', format='E', array=star_wl),
            fits.Column(name='sensitivity', format='E', array=sensitivity)
        ])
        fits.HDUList(hdus=[star_image, star_k]).writeto(wd + 'star.fits',
                                                        overwrite=True)
    if mode['star'] != 'No':
        fits.PrimaryHDU(medium_results['star'].astype('float32')).writeto(
            wd + 'm_star.fits', overwrite=True)

    if mode['cosmics'] != 'No':
        fits.PrimaryHDU(medium_results['cosmics'].astype('float32')).writeto(
            wd + 'm_cosmics.fits', overwrite=True)

    fits.PrimaryHDU(obj.astype('float32'),
                    header=header).writeto(wd + 'result.fits', overwrite=True)
    fits.PrimaryHDU(err.astype('float32'),
                    header=header).writeto(wd + 'errors.fits', overwrite=True)
    SNR = obj / err
    fits.PrimaryHDU(SNR.astype('float32')).writeto(wd + 'SNR.fits',
                                                   overwrite=True)

    if mode['bin'] == 'Yes':
        image = fits.PrimaryHDU(obj_bin.astype('float32'), header=header)
        table = fits.BinTableHDU(data=Table([sep]))
        fits.HDUList(hdus=[image, table]).writeto(wd + 'H_bins.fits',
                                                  overwrite=True)
        image = fits.PrimaryHDU(err_bin.astype('float32'), header=header)
        fits.HDUList(hdus=[image, table]).writeto(wd + 'H_err_bins.fits',
                                                  overwrite=True)
        image = fits.PrimaryHDU((obj_bin / err_bin).astype('float32'),
                                header=header)
        fits.HDUList(hdus=[image, table]).writeto(wd + 'H_SNR_bins.fits',
                                                  overwrite=True)
    return d, h2


if __name__ == "__main__":
    np.random.seed(42)

    beams = np.recarray(4, dtype=[('BMAJ', '>f4'), ('BMIN', '>f4'),
                                  ('BPA', '>f4'), ('CHAN', '>i4'),
                                  ('POL', '>i4')])
    beams['BMAJ'] = [0.4,0.3,0.3,0.4] # arcseconds
    beams['BMIN'] = [0.1,0.2,0.2,0.1]
    beams['BPA'] = [0,45,60,30] # degrees
    beams['CHAN'] = [0,1,2,3]
    beams['POL'] = [0,0,0,0]
    beams = fits.BinTableHDU(beams)

    # Single Stokes
    h = fits.header.Header.fromtextfile(HEADER_FILENAME)
    h['BUNIT'] = 'K' # Kelvins are a valid unit, JY/BEAM are not: they should be tested separately
    h['NAXIS1'] = 2
    h['NAXIS2'] = 3
    h['NAXIS3'] = 4
    h['NAXIS4'] = 1
    d = np.random.random((1, 2, 3, 4))

    fits.writeto('advs.fits', d, h, overwrite=True)

    d, h = transpose(d, h, [1, 2, 3, 0])
    fits.writeto('dvsa.fits', d, h, overwrite=True)
Exemple #12
0
def tpf2lc(fname, radii, aper_shape='round', outlier_sigma=5,
        flat_window=301, corr_window=51, cutoff_limit=1.0, polyorder=4,
        break_tolerance=5,save_as_tpf=False, verbose=False, outdir='reduced'):
    '''
    Do aperture photometry with multiple apertures and a mask.
    The light curve with a given aperture is appended with the original data and saved
    as separate fits. The best light curve/aperture (determined with cdpp) is saved in index=1. 
    '''
    fname_new = os.path.join(outdir,epic+'_'+aper_shape+'.fits')
    if 

    print('\nAperture photometry with r={} and {} mask...\n'.format(radii,aper_shape))
    if verbose:
        print("""sigma cut for outliers: {}\nwindow length (flatten): {}\nwindow length (sff): {}\ncutoff limit (if mask=irregular): {}\n
            """.format(outlier_sigma,flat_window,corr_window,cutoff_limit))
    hdr = fits.getheader(fname)
    hdulist = fits.open(fname)
    tpf = KeplerTargetPixelFile(fname, quality_bitmask='hardest')
    epic=str(tpf.keplerid)

    if epic not in hdulist.filename():
        raise ValueError('Kepler ID in header doesn\'t match the filename')

    flux_per_r = {}
    cdpps = {}
    for r in radii:
        mask = make_mask(tpf.flux, rad=r, shape=aper_shape, epic=epic)
        lc = tpf.to_lightcurve(aperture_mask=mask);
        lc2 = lc.remove_nans().remove_outliers(sigma=outlier_sigma)
        flat_lc2, trend = lc2.flatten(window_length=flat_window,
                                    polyorder=polyorder,
                                    break_tolerance=break_tolerance,
                                    return_trend=True)
        corr_lc = flat_lc2.correct(method='sff',windows=corr_window)

        flux_per_r[r]=(corr_lc.time,corr_lc.flux,corr_lc.flux_err)
        cdpps[r] = corr_lc.cdpp()

    ###TO DO: add lc generated with irregular mask
    mask = make_mask(tpf.flux, rad=r, shape='irregular', epic=epic)
    lc = tpf.to_lightcurve(aperture_mask=mask);
    lc2 = lc.remove_nans().remove_outliers(sigma=outlier_sigma)
    flat_lc2, trend = lc2.flatten(window_length=flat_window,
                                polyorder=polyorder,
                                break_tolerance=break_tolerance,
                                return_trend=True)
    corr_lc = flat_lc2.correct(method='sff',windows=corr_window)
    flux_per_r['irreg']=(corr_lc.time,corr_lc.flux,corr_lc.flux_err)
    cdpps['irreg'] = corr_lc.cdpp()

    if save_as_tpf:
        cdpp_list=[]
        #append to hdulist photometry of each aperture and save
        for num,r in enumerate(flux_per_r):
            comment_num = 'COMMENT{}'.format(num)
            aper_name = '{}_APER{}'.format(aper_shape,num)
            hdr['ap_rad'] = r
            hdr['ap_shape'] = aper_shape
            hdr['cdpp'] = cdpps[r]
            cdpp_list.append(cdpps[r])

            tab = table.Table(flux_per_r[r], names=['time','flux','flux_err'])
            bintab=fits.BinTableHDU(tab,name=aper_name,header=hdr)
            #append bin table to original hdulist
            hdulist.append(bintab)

        #find smallest cdpp
        best_r=min(cdpps.items(), key=operator.itemgetter(1))[0]

        #re-create bin table
        #tab = table.Table(flux_per_r[best_r], names=['time','flux','flux_err'])
        #bintab=fits.BinTableHDU(tab,name=aper_name,header=hdr)
        #move to index 3 (0: primary; 1: table; 2: aperture)
        #hdulist.insert(3, bintab)

        #alternatively, move hdu into last index of hdulist
        hdulist += [hdulist.pop(3+np.argmin(cdpp_list))]

        #make hdu for best mask
        if best_r == 'irreg':
            mask = make_mask(tpf.flux, rad=r, shape='irreg', epic=epic)
        else:
            mask = make_mask(tpf.flux, rad=best_r, shape=aper_shape, epic=epic)
        hdu=fits.hdu.ImageHDU(np.array(mask,dtype=float), name='APERTURE', header=hdr) #problem with bool
        #replace aperture
        hdulist[2] = hdu

        #save fits
        #fname_new = os.path.join(outdir,fname.split('/')[-1].split('-')[0][4:]+'_'+aper_shape+'.fits')
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        hdulist.writeto(fname_new)
        print('Saved: {}\n'.format(fname_new))

    return flux_per_r, mask


def read_tpf(fname,index,return_hdr=True):
    '''
    fname: str, filename
    index: int, hdulist index
            [0,1,2] = primary, target table, aperture mask
            [4,...] = photometry using specified aperture
    return_hdr: bool
    '''
    hdulist = fits.open(fname)
    if index == 0: #primary
        data = hdulist[index].data
        hdr = hdulist[index].header
        if return_hdr:
            return data, hdr
        else:
            return data
    elif index == 1: #target tables
        data = hdulist[index].data
        hdr = hdulist[index].header
        if return_hdr:
            return data, hdr
        else:
            return data
    elif index == 2: #aperture mask
        data = hdulist[index].data
        hdr = hdulist[index].header
        if return_hdr:
            return data, hdr
        else:
            return data
    else:
        df=table.Table(hdulist[index].data).to_pandas()
        hdr = hdulist[index].header
        if return_hdr:
            return df, hdr
        else:
            return df

def plot_lc(fname,index,verbose=True,show_all_lc=False,show_mask=True,sigma=None):
    hdulist = fits.open(fname)
    hdulen = len(hdulist)
    tpf = KeplerTargetPixelFile(fname, quality_bitmask='hardest')
    if verbose:
        print(hdulist.info())

    if index == 0: #primary
        data, hdr = read_tpf(fname,index,return_hdr=True)
        print('Plot of primary hdu currently unsupported. Try index=2\n')
        sys.exit()

    elif index == 1: #target tables
        data, hdr = read_tpf(fname,index,return_hdr=True)
        print('Plot of target table currently unsupported. Try index=2\n')
        sys.exit()

    nrows=hdulen-3 #remove first 3 indices
    fig, ax = pl.subplots(nrows=nrows,ncols=1,figsize=(10,10),sharex=True,squeeze=True)
    if index<hdulen and index>2:
        #read flux per r
        epic = str(tpf.keplerid)
        if show_all_lc:
            n=0
            for idx in np.arange(3,hdulen,1):
                df, hdr = read_tpf(fname,idx,return_hdr=True)
                t = df['time']
                f = df['flux']
                ferr = df['flux_err']
                rad = hdr['ap_rad']
                shape = hdr['ap_shape']

                if sigma is not None:
                    f_c=sigma_clip(f, sigma=sigma)
                    t = t[~f_c.mask]
                    f = f[~f_c.mask]
                    ferr = ferr[~f_c.mask]
                    print('removed {} outliers.\n'.format(np.sum(f_c.mask)))
                ax[n].errorbar(t,f,yerr=ferr,marker='o',label='r={}'.format(rad))
                ax[n].legend()
                n+=1
            pl.title('EPIC-'+epic)

        else:
            df, hdr = read_tpf(fname,index,return_hdr=True)
            t = df['time']
            f = df['flux']
            ferr = df['ferr']
            rad = hdr['ap_radius']
            shape = hdr['ap_shape']
            ax.errorbar(t,f,yerr=ferr,marker='o',label='r={}'.format(rad))
            ax.set_title(epic)
            pl.legend()

        if show_mask:
            mask = hdulist[2].data
            #fluxes = tpf.flux
            #plot_aper_mask(fluxes,rad,aper_shape=shape,contrast=0.1,epic=tpf.keplerid)
            ax = tpf.plot(aperture_mask=mask, mask_color='w', #frame=300,
                scale='linear', cmap='viridis', show_colorbar=True)
            pl.show()
    elif index>hdulen:
        print('hdulist has index until {} only. Exiting!\n'.format(hdulen))
        sys.exit()

    else:
        print('Incorrect index. Set verbose=True.\n')

    return fig
#---------------------------STATS---------------------------#


def noise_statistic(t, f, timescale=0.25, verbose=False):
    '''
    c.f. lightkurve.cdpp()
    '''
    nchunks = int((t[-1]-t[0])/timescale)+1
    idx = [(t > t[0] + n * timescale) & (t < t[0] + (n + 1) * timescale) for n in range(nchunks)]
    chunks = [f[ix] for ix in idx if ix.sum() > 1]

    cdpp = np.std([np.nanmedian(ch) for ch in chunks])

    if verbose:
        print('cdpp = {:.4f}'.format(cdpp))

    return cdpp
Exemple #13
0
def precompute_synthetic_grid(output_dirname,
                              ranges,
                              wavelengths,
                              to_resolution,
                              modeled_layers_pack,
                              atomic_linelist,
                              isotopes,
                              solar_abundances,
                              segments=None,
                              number_of_processes=1,
                              code="spectrum",
                              use_molecules=False,
                              steps=False,
                              tmp_dir=None):
    """
    Pre-compute a synthetic grid with some reference ranges (Teff, log(g) and
    MH combinations) and all the steps that iSpec will perform in the
    astrophysical parameter determination process.

    All the non-convolved spectra will be saved in a subdir and a complete
    grid file with the reference points already convolved will be saved in a
    FITS file for fast comparison.

    The output directory can be used by the routines 'model_spectrum' and
    'estimate_initial_ap'.
    """
    code = code.lower()
    if code not in ['spectrum', 'turbospectrum', 'moog', 'synthe', 'sme']:
        raise Exception("Unknown radiative transfer code: %s" % (code))

    reference_list_filename = output_dirname + "/parameters.tsv"
    if to_resolution is not None:
        reference_grid_filename = output_dirname + "/convolved_grid_%i.fits.gz" % to_resolution
    fits_dir = os.path.join(output_dirname, "grid/")
    mkdir_p(fits_dir)
    if steps:
        steps_fits_dir = os.path.join(output_dirname, "steps/")
        mkdir_p(steps_fits_dir)

    import dill  # To allow pickle of lambda functions (e.g., one element in modeled_layers_pack)
    import pickle
    pickled_modeled_layers_pack = pickle.dumps(modeled_layers_pack)

    # For code != "grid", ranges are always in position 7 (for grid it would be in position 8)
    valid_ranges = modeled_layers_pack[7]
    teff_range = valid_ranges['teff']
    logg_range = valid_ranges['logg']
    MH_range = valid_ranges['MH']
    alpha_range = valid_ranges.get(
        'alpha', (-1.5, 1.5)
    )  # Default (0.,) if 'alpha' is not a free parameter for atmosphere interpolation
    vmic_range = valid_ranges.get(
        'vmic', (0.0, 50.)
    )  # Default (0.,) if 'vmic' is not a free parameter for atmosphere interpolation

    # Parallelization pool
    if number_of_processes == 1:
        pool = None
    else:
        pool = Pool(number_of_processes)

    # Create grid binary file
    elapsed = 0  # seconds

    num_ref_spec = len(ranges)
    num_spec = num_ref_spec * 9  # Reference + 8 variations in Teff, logg, MH, alpha, vmic, vmac, vsini, limb darkening coeff

    i = 0
    for teff, logg, MH, alpha, vmic in ranges:
        if vmic is None:
            vmic = estimate_vmic(teff, logg, MH)
        vmac = 0.0  # This can be modified after synthesis if needed
        vsini = 0.0  # This can be modified after synthesis if needed
        limb_darkening_coeff = 0.00  # This can be modified after synthesis if needed
        resolution = 0  # This can be modified after synthesis if needed
        is_step = False
        if not valid_atmosphere_target(modeled_layers_pack, {
                'teff': teff,
                'logg': logg,
                'MH': MH,
                'alpha': alpha
        }):
            raise Exception(
                "Target parameters out of the valid ranges: teff={} logg={} MH={} alpha={}"
                .format(teff, logg, MH, alpha))
        points = [
            (teff, logg, MH, alpha, vmic, vmac, vsini, limb_darkening_coeff,
             is_step),
        ]
        if steps:
            is_step = True
            new_teff = teff + Constants.SYNTH_STEP_TEFF if teff + Constants.SYNTH_STEP_TEFF <= teff_range[
                -1] else teff - Constants.SYNTH_STEP_TEFF
            new_logg = logg + Constants.SYNTH_STEP_LOGG if logg + Constants.SYNTH_STEP_LOGG <= logg_range[
                -1] else logg - Constants.SYNTH_STEP_LOGG
            new_MH = MH + Constants.SYNTH_STEP_MH if MH + Constants.SYNTH_STEP_MH <= MH_range[
                -1] else MH - Constants.SYNTH_STEP_MH
            new_alpha = alpha + Constants.SYNTH_STEP_ALPHA if alpha + Constants.SYNTH_STEP_ALPHA <= alpha_range[
                -1] else alpha - Constants.SYNTH_STEP_ALPHA
            new_vmic = vmic + Constants.SYNTH_STEP_VMIC if vmic + Constants.SYNTH_STEP_VMIC <= vmic_range[
                -1] else vmic - Constants.SYNTH_STEP_VMIC
            # For each reference point, calculate also the variations that iSpec will perform in the first iteration
            points += [  # Final unconvolved spectra where vmic/vmac are free and do not follow vmic/vmac empirical relations
                (new_teff, logg, MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, new_logg, MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, new_MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, MH, new_alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, MH, alpha, new_vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
            ]
            points += [
                # Final unconvolved spectra where vmic is not free and does follow vmic empirical relations
                (new_teff, logg, MH, alpha, estimate_vmic(new_teff, logg, MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
                (teff, new_logg, MH, alpha, estimate_vmic(teff, new_logg, MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
                (teff, logg, new_MH, alpha, estimate_vmic(teff, logg, new_MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
            ]

        for j, (teff, logg, MH, alpha, vmic, vmac, vsini, limb_darkening_coeff,
                is_step) in enumerate(points):
            if is_step:
                filename_out = steps_fits_dir + "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                    int(teff), logg, MH, alpha, vmic, vmac, vsini,
                    limb_darkening_coeff) + ".fits.gz"
            else:
                filename_out = fits_dir + "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                    int(teff), logg, MH, alpha, vmic, vmac, vsini,
                    limb_darkening_coeff) + ".fits.gz"

            if os.path.exists(filename_out):
                print("Skipping", teff, logg, MH, alpha, vmic, vmac, vsini,
                      limb_darkening_coeff, "already computed")
                continue

            if pool is None:
                if sys.platform == "win32":
                    # On Windows, the best timer is time.clock()
                    default_timer = time.clock
                else:
                    # On most other platforms the best timer is time.time()
                    default_timer = time.time

                lock = FileLock(filename_out + ".lock")
                try:
                    lock.acquire(timeout=-1)  # Don't wait
                except (LockTimeout, AlreadyLocked) as e:
                    # Some other process is computing this spectrum, do not continue
                    print("Skipping", teff, logg, MH, alpha, vmic, vmac, vsini,
                          limb_darkening_coeff, "already locked")
                    continue

                try:
                    tcheck = default_timer()
                    # Validate parameters
                    __generate_synthetic_fits(filename_out,
                                              wavelengths,
                                              segments,
                                              teff,
                                              logg,
                                              MH,
                                              alpha,
                                              vmic,
                                              vmac,
                                              vsini,
                                              limb_darkening_coeff,
                                              resolution,
                                              pickled_modeled_layers_pack,
                                              atomic_linelist,
                                              isotopes,
                                              solar_abundances,
                                              code=code,
                                              use_molecules=use_molecules,
                                              tmp_dir=tmp_dir,
                                              locked=True)
                    elapsed = default_timer() - tcheck

                    print(
                        "-----------------------------------------------------"
                    )
                    print("Remaining time:")
                    print("\t", (num_spec - i) * elapsed, "seconds")
                    print("\t", (num_spec - i) * (elapsed / 60), "minutes")
                    print("\t", (num_spec - i) * (elapsed / (60 * 60)),
                          "hours")
                    print("\t", (num_spec - i) * (elapsed / (60 * 60 * 24)),
                          "days")
                    print(
                        "-----------------------------------------------------"
                    )
                finally:
                    lock.release()

            else:
                pool.apply_async(__generate_synthetic_fits, [
                    filename_out, wavelengths, segments, teff, logg, MH, alpha,
                    vmic, vmac, vsini, limb_darkening_coeff, resolution,
                    pickled_modeled_layers_pack, atomic_linelist, isotopes,
                    solar_abundances
                ],
                                 kwds={
                                     'code': code,
                                     'use_molecules': use_molecules,
                                     'tmp_dir': tmp_dir,
                                     'locked': False
                                 })
            i += 1

    if pool is not None:
        pool.close()
        pool.join()

    # Create parameters.tsv
    reference_list = Table()
    if len(np.unique(ranges[['logg', 'MH', 'alpha', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_teff', dtype=int))
    else:
        reference_list.add_column(Column(name='teff', dtype=int))
    if len(np.unique(ranges[['teff', 'MH', 'alpha', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_logg', dtype=float))
    else:
        reference_list.add_column(Column(name='logg', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'alpha',
                             'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_MH', dtype=float))
    else:
        reference_list.add_column(Column(name='MH', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'MH', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_alpha', dtype=float))
    else:
        reference_list.add_column(Column(name='alpha', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'MH', 'alpha']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_vmic', dtype=float))
    else:
        reference_list.add_column(Column(name='vmic', dtype=float))
    reference_list.add_column(Column(name='filename', dtype='|S100'))
    for teff, logg, MH, alpha, vmic in ranges:
        # Only use the first spectra generated for each combination
        zero_vmac = 0.0
        zero_vsini = 0.0
        zero_limb_darkening_coeff = 0.00
        reference_filename_out = "./grid/{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
            int(teff), logg, MH, alpha, vmic, zero_vmac, zero_vsini,
            zero_limb_darkening_coeff) + ".fits.gz"
        reference_list.add_row(
            (int(teff), logg, MH, alpha, vmic, reference_filename_out))

    if not os.path.exists(reference_list_filename):
        lock = FileLock(reference_list_filename + ".lock")
        try:
            lock.acquire(timeout=-1)  # Don't wait
        except (LockTimeout, AlreadyLocked) as e:
            # Some other process is writing this file, do not continue
            print("Skipping", reference_list_filename, "already locked")
        else:
            try:
                ascii.write(reference_list,
                            reference_list_filename,
                            delimiter='\t',
                            overwrite=True)
                print("Written", reference_list_filename)
            finally:
                lock.release()

    if to_resolution is not None:
        if not os.path.exists(reference_grid_filename):
            lock = FileLock(reference_grid_filename + ".lock")
            try:
                lock.acquire(timeout=-1)  # Don't wait
            except (LockTimeout, AlreadyLocked) as e:
                # Some other process is computing this spectrum, do not continue
                print("Skipping", reference_grid_filename, "already locked")
            else:
                try:
                    reference_grid = None
                    complete_reference_list = Table()
                    complete_reference_list.add_column(
                        Column(name='teff', dtype=int))
                    complete_reference_list.add_column(
                        Column(name='logg', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='MH', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='alpha', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vmic', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vmac', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vsini', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='limb_darkening_coeff', dtype=float))
                    for teff, logg, MH, alpha, vmic in ranges:
                        # Only use the first spectra generated for each combination
                        zero_vmac = 0.0
                        zero_vsini = 0.0
                        zero_limb_darkening_coeff = 0.00
                        vmac = estimate_vmac(teff, logg, MH)
                        vsini = 1.6  # Sun
                        limb_darkening_coeff = 0.6
                        reference_filename_out = "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                            int(teff), logg, MH, alpha, vmic, zero_vmac,
                            zero_vsini, zero_limb_darkening_coeff) + ".fits.gz"
                        if not os.path.exists(fits_dir +
                                              reference_filename_out):
                            continue
                        complete_reference_list.add_row(
                            (int(teff), logg, MH, alpha, vmic, vmac, vsini,
                             limb_darkening_coeff))

                        # Spectra in the grid is convolved to the specified resolution for fast comparison
                        print("Quick grid:", reference_filename_out)
                        spectrum = read_spectrum(fits_dir +
                                                 reference_filename_out)

                        segments = None
                        vrad = (0, )
                        spectrum['flux'] = apply_post_fundamental_effects(spectrum['waveobs'], spectrum['flux'], segments, \
                                    macroturbulence=vmac, vsini=vsini, \
                                    limb_darkening_coeff=limb_darkening_coeff, R=to_resolution, vrad=vrad)

                        if reference_grid is None:
                            reference_grid = spectrum['flux']
                        else:
                            reference_grid = np.vstack(
                                (reference_grid, spectrum['flux']))

                    if len(ranges) == len(complete_reference_list):
                        # Generate FITS file with grid for fast comparison
                        primary_hdu = fits.PrimaryHDU(reference_grid)
                        wavelengths_hdu = fits.ImageHDU(wavelengths,
                                                        name="WAVELENGTHS")
                        params_bintable_hdu = fits.BinTableHDU(
                            complete_reference_list.as_array(), name="PARAMS")
                        fits_format = fits.HDUList([
                            primary_hdu, wavelengths_hdu, params_bintable_hdu
                        ])
                        fits_format.writeto(reference_grid_filename,
                                            overwrite=True)
                        print("Written", reference_grid_filename)
                finally:
                    lock.release()
Exemple #14
0
    def to_ogip_files(self, outdir=None, use_sherpa=False, overwrite=False):
        """Write OGIP files.

        If you want to use the written files with Sherpa you have to set the
        ``use_sherpa`` flag. Then all files will be written in units 'keV' and
        'cm2'.

        The naming scheme is fixed, with {name} the dataset name:
        
        * PHA file is named pha_obs{name}.fits
        * BKG file is named bkg_obs{name}.fits
        * ARF file is named arf_obs{name}.fits
        * RMF file is named rmf_obs{name}.fits

        Parameters
        ----------
        outdir : `pathlib.Path`
            output directory, default: pwd
        use_sherpa : bool, optional
            Write Sherpa compliant files, default: False
        overwrite : bool
            Overwrite existing files?
        """
        # TODO: refactor and reduce amount of code duplication
        outdir = Path.cwd() if outdir is None else make_path(outdir)
        outdir.mkdir(exist_ok=True, parents=True)

        phafile = f"pha_obs{self.name}.fits"

        bkgfile = phafile.replace("pha", "bkg")
        arffile = phafile.replace("pha", "arf")
        rmffile = phafile.replace("pha", "rmf")

        counts_table = self.counts.to_table()
        counts_table["QUALITY"] = np.logical_not(self.mask_safe)
        counts_table["BACKSCAL"] = self.acceptance
        counts_table["AREASCAL"] = np.ones(self.acceptance.size)
        meta = self._ogip_meta()

        meta["respfile"] = rmffile
        meta["backfile"] = bkgfile
        meta["ancrfile"] = arffile
        meta["hduclas2"] = "TOTAL"
        counts_table.meta = meta

        name = counts_table.meta["name"]
        hdu = fits.BinTableHDU(counts_table, name=name)
        hdulist = fits.HDUList(
            [fits.PrimaryHDU(), hdu,
             self._ebounds_hdu(use_sherpa)])

        if self.gti is not None:
            hdu = fits.BinTableHDU(self.gti.table, name="GTI")
            hdulist.append(hdu)

        hdulist.writeto(outdir / phafile, overwrite=overwrite)

        self.aeff.write(outdir / arffile,
                        overwrite=overwrite,
                        use_sherpa=use_sherpa)

        if self.counts_off is not None:
            counts_off_table = self.counts_off.to_table()
            counts_off_table["QUALITY"] = np.logical_not(self.mask_safe)
            counts_off_table["BACKSCAL"] = self.acceptance_off
            counts_off_table["AREASCAL"] = np.ones(self.acceptance.size)
            meta = self._ogip_meta()
            meta["hduclas2"] = "BKG"

            counts_off_table.meta = meta
            name = counts_off_table.meta["name"]
            hdu = fits.BinTableHDU(counts_off_table, name=name)
            hdulist = fits.HDUList(
                [fits.PrimaryHDU(), hdu,
                 self._ebounds_hdu(use_sherpa)])
            hdulist.writeto(outdir / bkgfile, overwrite=overwrite)

        if self.edisp is not None:
            self.edisp.write(outdir / rmffile,
                             overwrite=overwrite,
                             use_sherpa=use_sherpa)
Exemple #15
0
def main(name,version,HSTband,scalepc):
    '''match nebulae and association catalogue

    '''

    print(f'parameters: {name} {version} {HSTband} {scalepc}')


    # =====================================================================
    # Read in the data
    # =====================================================================

    #p = {x:sample_table.loc[name][x] for x in sample_table.columns}

    # DAP linemaps (Halpha and OIII)
    filename = data_ext / 'MUSE' / 'DR2.1' / 'copt' / 'MUSEDAP'
    filename = [x for x in filename.iterdir() if x.stem.startswith(name)][0]

    with fits.open(filename) as hdul:
        Halpha = NDData(data=hdul['HA6562_FLUX'].data,
                        uncertainty=StdDevUncertainty(hdul['HA6562_FLUX_ERR'].data),
                        mask=np.isnan(hdul['HA6562_FLUX'].data),
                        meta=hdul['HA6562_FLUX'].header,
                        wcs=WCS(hdul['HA6562_FLUX'].header))
        OIII = NDData(data=hdul['OIII5006_FLUX'].data,
                        uncertainty=StdDevUncertainty(hdul['OIII5006_FLUX_ERR'].data),
                        mask=np.isnan(hdul['OIII5006_FLUX'].data),
                        meta=hdul['OIII5006_FLUX'].header,
                        wcs=WCS(hdul['OIII5006_FLUX'].header))

    # the original catalogue from Francesco
    with fits.open(nebulae_file) as hdul:
        nebulae = Table(hdul[1].data)
    nebulae['SkyCoord'] = SkyCoord(nebulae['cen_ra']*u.deg,nebulae['cen_dec']*u.deg,frame='icrs')

    '''
    with fits.open(basedir/'data'/'interim'/f'Nebulae_Catalogue_v2p1_dig.fits') as hdul:
        dig = Table(hdul[1].data)

    with fits.open(basedir/'data'/'interim'/f'Nebulae_Catalogue_v2p1_fuv.fits') as hdul:
        fuv = Table(hdul[1].data)

    with fits.open(basedir/'data'/'interim'/f'Nebulae_Catalogue_v2p1_eq.fits') as hdul:
        eq_width = Table(hdul[1].data)

    nebulae = join(nebulae,fuv,keys=['gal_name','region_ID'])
    nebulae = join(nebulae,eq_width,keys=['gal_name','region_ID'])
    nebulae = join(nebulae,dig,keys=['gal_name','region_ID'])'
    '''

    nebulae.rename_columns(['cen_x','cen_y'],['x','y'])

    with np.errstate(divide='ignore',invalid='ignore'):
        nebulae['[SIII]/[SII]'] = np.nan
        SII = nebulae['SII6716_FLUX_CORR']+nebulae['SII6730_FLUX_CORR']
        SIII = nebulae['SIII6312_FLUX_CORR']+nebulae['SIII9068_FLUX_CORR']
        nebulae[SII>0]['[SIII]/[SII]'] = SIII[SII>0]/SII[SII>0]
        #nebulae['HA/FUV'] = nebulae['HA6562_FLUX_CORR']/nebulae['FUV_FLUX_CORR']
        #nebulae['HA/FUV_err'] = nebulae['HA/FUV']*np.sqrt((nebulae['HA6562_FLUX_CORR_ERR']/nebulae['HA6562_FLUX_CORR'])**2+(nebulae['FUV_FLUX_CORR_ERR']/nebulae['FUV_FLUX_CORR'])**2)

    nebulae = nebulae[nebulae['gal_name']==name]
    nebulae.add_index('region_ID')

    filename = data_ext / 'Products' / 'Nebulae_catalogs'/'Nebulae_catalogue_v2' /'spatial_masks'/f'{name}_nebulae_mask_V2.fits'
    with fits.open(filename) as hdul:
        nebulae_mask = NDData(hdul[0].data.astype(float),mask=Halpha.mask,meta=hdul[0].header,wcs=WCS(hdul[0].header))
        nebulae_mask.data[nebulae_mask.data==-1] = np.nan

    #print(f'{name}: {len(nebulae)} HII-regions in final catalogue')

    # the association catalogue and mask
    target  = name.lower()
    associations, associations_mask = read_associations(folder=association_folder,
                                                        target=target,scalepc=scalepc,
                                                        HSTband=HSTband,version=version,data='all')
    if not associations:
        return 0
    
    # enviornmental masks
    with fits.open(env_masks_folder / f'{name}_simple.fits') as hdul:
        mask = reproject_interp(hdul[0],Halpha.meta,order='nearest-neighbor',return_footprint=False)
        env_masks_neb = NDData(data=mask,
                           meta=hdul[0].header,
                           wcs=Halpha.wcs)
    
    #print(f'{name}: {len(associations)} associations in catalogue')

    # =====================================================================
    # reproject and match catalogues
    # =====================================================================

    nebulae_hst, _  = reproject_interp(nebulae_mask,
                                    output_projection=associations_mask.wcs,
                                    shape_out=associations_mask.data.shape,
                                    order='nearest-neighbor')    

    # we scale the associations such that the the id is in the decimal
    scale = 10**np.ceil(np.log10(max(associations_mask.data[~np.isnan(associations_mask.data)])))
    s_arr = associations_mask.data/scale+nebulae_hst

    #print(f'masks reprojected')

    # ids of associations, nebulae and combination (sum) of both
    a_id = np.unique(associations_mask.data[~np.isnan(associations_mask.data)]).astype(int)
    n_id = np.unique(nebulae_mask.data[~np.isnan(nebulae_mask.data)]).astype(int)
    s_id = np.unique(s_arr[~np.isnan(s_arr)])

    # this splits the sum into two parts (nebulae and associations)
    a_modf,n_modf = np.modf(s_id)
    n_modf = n_modf.astype(int)
    a_modf = np.round(a_modf*scale).astype(int)

    unique_a, count_a = np.unique(a_modf,return_counts=True)
    unique_n, count_n = np.unique(n_modf,return_counts=True)

    nebulae_dict = {int(n) : a_modf[n_modf==n].tolist() for n in n_id}     
    associations_dict = {int(a) : n_modf[a_modf==a].tolist() for a in a_id}     


    # so far we ensured that the nebulae in unique_n have only one association,
    # but it is possible that this association goes beyond the nebulae and into
    # a second nebulae. Those objects are excluded here
    isolated_nebulae = set()
    isolated_assoc   = set()
    for n,v in nebulae_dict.items():
        if len(v)==1:
            if len(associations_dict[v[0]])==1:
                isolated_nebulae.add(n)
                isolated_assoc.add(v[0])

    #print(f'n_associations = {len(associations_dict)}')
    #print(f'n_nebulae      = {len(nebulae_dict)}')
    #print(f'1to1 match     = {len(isolated_nebulae)}')


    # we save those two dicts so we do not have to redo this everytime
    with open(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_nebulae.yml','w+') as f:
        yaml.dump(nebulae_dict,f)
    with open(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_associations.yml','w+') as f:
        yaml.dump(associations_dict,f)


    # find all assoc that have at least one pixel outside of the nebulae masks
    mask = associations_mask.data.copy()
    mask[~np.isnan(nebulae_hst)] = np.nan
    outside = np.unique(mask[~np.isnan(mask)].astype(int))

    # find all assoc that have at least one pixel inside of the nebulea masks
    mask = associations_mask.data.copy()
    mask[np.isnan(nebulae_hst)] = np.nan
    inside = np.unique(mask[~np.isnan(mask)].astype(int))

    contained = np.setdiff1d(inside,outside)
    partial   = np.intersect1d(inside,outside)
    isolated  = np.setdiff1d(outside,inside)

    #print(f'contained: {len(contained)}\npartial: {len(partial)}\nisolated: {len(isolated)}')

    assoc_tmp = associations[['assoc_ID']].copy()
    assoc_tmp.add_index('assoc_ID')

    x_asc,y_asc = associations['SkyCoord'].to_pixel(env_masks_neb.wcs)
    outside = (x_asc > env_masks_neb.data.shape[1]) | (y_asc > env_masks_neb.data.shape[0])
    x_asc[outside] = 0
    y_asc[outside] = 0
    assoc_tmp['env_asc'] = [environment_dict[env_masks_neb.data[y,x]] for 
                            x,y in zip(x_asc.astype(int),y_asc.astype(int))]
    assoc_tmp[outside]['env_asc'] = ''
    
    assoc_tmp['overlap'] = np.empty(len(associations),dtype='U9')
    assoc_tmp['overlap'][np.isin(assoc_tmp['assoc_ID'],contained)] = 'contained'
    assoc_tmp['overlap'][np.isin(assoc_tmp['assoc_ID'],partial)]   = 'partial'
    assoc_tmp['overlap'][np.isin(assoc_tmp['assoc_ID'],isolated)]  = 'isolated'
    assoc_tmp['1to1'] = False
    assoc_tmp['1to1'][np.isin(assoc_tmp['assoc_ID'],list(isolated_assoc))] = True
    assoc_tmp['Nnebulae'] = [len(associations_dict[k]) for k in assoc_tmp['assoc_ID']]

    assoc_tmp['region_ID'] = np.nan
    assoc_tmp['region_ID'][assoc_tmp['1to1']] = [associations_dict[k][0] for k in assoc_tmp[assoc_tmp['1to1']]['assoc_ID']]

    overlap = join(
        Table(np.unique(associations_mask.data[~np.isnan(associations_mask.data)],return_counts=True),names=['assoc_ID','size']),
        Table(np.unique(associations_mask.data[~np.isnan(nebulae_hst) & ~np.isnan(associations_mask.data)],return_counts=True),names=['assoc_ID','overlap_size']),
        keys=['assoc_ID'],join_type='outer')
    overlap = overlap.filled(0)
    overlap['overlap_asc'] = overlap['overlap_size']/overlap['size']
    overlap['overlap_asc'].info.format = '%.2f'
    assoc_tmp = join(assoc_tmp,overlap[['assoc_ID','overlap_asc']],keys='assoc_ID')

    #print('write to file')
    hdu = fits.BinTableHDU(assoc_tmp,name='joined catalogue')
    hdu.writeto(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_associations.fits',overwrite=True)


    nebulae_tmp = nebulae[['region_ID','x','y']].copy()
    nebulae_tmp.add_index('region_ID')

    nebulae_tmp['env_neb'] = [environment_dict[env_masks_neb.data[y,x]] for 
                              x,y in zip(nebulae_tmp['x'].astype(int),nebulae_tmp['y'].astype(int))]

    nebulae_tmp['neighbors'] = np.nan
    for row in nebulae_tmp:
        row['neighbors'] = len(find_neighbors(nebulae_mask.data,tuple(row[['x','y']]),row['region_ID'],plot=False))
    del nebulae_tmp[['x','y']]

    nebulae_tmp['1to1'] = False
    nebulae_tmp['1to1'][np.isin(nebulae_tmp['region_ID'],list(isolated_nebulae))] = True
    nebulae_tmp['Nassoc'] = [len(nebulae_dict[k]) for k in nebulae_tmp['region_ID']]
    nebulae_tmp['assoc_ID'] = np.nan
    nebulae_tmp['assoc_ID'][nebulae_tmp['1to1']] = [nebulae_dict[k][0] for k in nebulae_tmp[nebulae_tmp['1to1']]['region_ID']]


    overlap = join(
        Table(np.unique(nebulae_hst[~np.isnan(nebulae_hst)],return_counts=True),names=['region_ID','size']),
        Table(np.unique(nebulae_hst[~np.isnan(nebulae_hst) & ~np.isnan(associations_mask.data)],return_counts=True),names=['region_ID','overlap_size']),
        keys=['region_ID'],join_type='outer')
    overlap = overlap.filled(0)
    overlap['overlap_neb'] = overlap['overlap_size']/overlap['size']
    overlap['overlap_neb'].info.format = '%.2f'
    nebulae_tmp = join(nebulae_tmp,overlap[['region_ID','overlap_neb']],keys='region_ID')

    hdu = fits.BinTableHDU(nebulae_tmp,name='joined catalogue')
    hdu.writeto(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_nebulae.fits',overwrite=True)
    #del nebulae_tmp['1to1']

    #print(f'{np.sum(nebulae_tmp["neighbors"]==0)} nebulae have no neighbors')

    catalogue = join(assoc_tmp,nebulae_tmp,keys=['assoc_ID','region_ID'])
    catalogue = join(catalogue,nebulae,keys='region_ID')
    catalogue = join(catalogue,associations,keys='assoc_ID')

    # pay attention to the order of assoc, neb
    catalogue.rename_columns(['X','Y','x','y','RA','DEC','cen_ra','cen_dec',
                              'reg_area','region_area',
                              'EBV_1','EBV_2','EBV_err','EBV_ERR',
                              'SkyCoord_1','SkyCoord_2'],
                             ['x_asc','y_asc','x_neb','y_neb','ra_asc','dec_asc','ra_neb','dec_neb',
                              'area_asc','area_neb',
                              'EBV_balmer','EBV_stars','EBV_balmer_err','EBV_stars_err',
                              'SkyCoord_asc','SkyCoord_neb'])

    # separation to other associations and nebulae
    idx,sep_asc,_= match_coordinates_sky(catalogue['SkyCoord_asc'],associations['SkyCoord'],nthneighbor=2)
    idx,sep_neb,_= match_coordinates_sky(catalogue['SkyCoord_neb'],nebulae['SkyCoord'],nthneighbor=2)
    catalogue['sep_asc'] = sep_asc.to(u.arcsec)
    catalogue['sep_neb'] = sep_neb.to(u.arcsec)

    # select the columns of the joined catalogue
    columns = ['assoc_ID','region_ID','x_asc','y_asc','x_neb','y_neb',
               'ra_asc','dec_asc','ra_neb','dec_neb','SkyCoord_asc','SkyCoord_neb',
               'env_asc','env_neb','area_asc','area_neb',
               'sep_asc','sep_neb','neighbors','Nassoc','overlap','overlap_asc','overlap_neb',
               'age','age_err','mass','mass_err','EBV_stars','EBV_stars_err','EBV_balmer','EBV_balmer_err',
               'met_scal','met_scal_err','logq_D91','logq_D91_err',] + \
                [x for x in nebulae.columns if x.endswith('_FLUX_CORR')] + \
                [x for x in nebulae.columns if x.endswith('_FLUX_CORR_ERR')] + \
                ['NUV_FLUX','NUV_FLUX_ERR','U_FLUX','U_FLUX_ERR','B_FLUX','B_FLUX_ERR',
                 'V_FLUX','V_FLUX_ERR','I_FLUX','I_FLUX_ERR'] 
    catalogue = catalogue[columns]
            
    catalogue.rename_columns([col for col in catalogue.columns if col.endswith('FLUX_CORR')],
                          [col.replace('FLUX_CORR','flux') for col in catalogue.columns if col.endswith('FLUX_CORR')])
    catalogue.rename_columns([col for col in catalogue.columns if col.endswith('FLUX_CORR_ERR')],
                          [col.replace('FLUX_CORR_ERR','flux_err') for col in catalogue.columns if col.endswith('FLUX_CORR_ERR')])
    catalogue['assoc_ID'] = catalogue['assoc_ID'].astype('int')
    catalogue['region_ID'] = catalogue['region_ID'].astype('int')

    catalogue.info.description = 'Joined catalogue between associations and nebulae'
    mean_sep = np.mean(catalogue['SkyCoord_asc'].separation(catalogue['SkyCoord_neb']))
    #print(f'{len(catalogue)} objects in catalogue')
    #print(f'the mean separation between cluster and association center is {mean_sep.to(u.arcsecond):.2f}')


    export = catalogue.copy() #[catalogue['contained']]
    #export.add_column(export['SkyCoord_asc'].to_string(style='hmsdms',precision=2),index=6,name='RaDec_asc')
    #export.add_column(export['SkyCoord_neb'].to_string(style='hmsdms',precision=2),index=8,name='RaDec_neb')

    RA_asc ,DEC_asc = zip(*[x.split(' ') for x in export['SkyCoord_asc'].to_string(style='hmsdms',precision=2)])
    RA_neb ,DEC_neb = zip(*[x.split(' ') for x in export['SkyCoord_neb'].to_string(style='hmsdms',precision=2)])

    export.add_column(RA_asc,index=6,name='Ra_asc')
    export.add_column(DEC_asc,index=8,name='Dec_asc')
    export.add_column(RA_neb,index=10,name='Ra_neb')
    export.add_column(DEC_neb,index=12,name='Dec_neb')

    for col in export.columns:
        if col not in ['Ra_asc','Dec_asc','Ra_neb','Dec_neb','region_ID','cluster_ID','overlap','env_asc','env_neb']:
            export[col].info.format = '%.2f'

    del export[['ra_asc','dec_asc','ra_neb','dec_neb','SkyCoord_neb','SkyCoord_asc']]

    hdu = fits.BinTableHDU(export,name='joined catalogue')
    hdu.writeto(basedir/version/HSTband/f'{scalepc}pc'/f'{name}_{HSTband}_{scalepc}pc_associations_and_nebulae_joined.fits',overwrite=True)
Exemple #16
0
    def calibrate_all(self, use_calon=False):
        """Calibrate the scan in all available ways.

        The basic calibration is `(on - off)/off`, where `on` and `off` are
        on-source and off-source spectra respectively.

        New HDU lists are produced and added to the existing, uncalibrated
        ones.

        If the calibration mark has been used in some scan, an additional
        calibration is applied and further HDU lists are produced.

        Other Parameters
        ----------------
        use_calon : bool, default False
            If False, only the OFF + CAL is used for the calibration. If True,
            Also the ON + CAL is used and the calibration constant is averaged
            with that obtained through OFF + CAL.

        """
        new_tables = {}
        for caltype in ["cal", "onoff"]:
            for (filekey, hdul) in self.tables.items():
                new_filekey = filekey.replace("_all", "_" + caltype)
                new_hdul = copy.deepcopy(hdul)

                table = Table(new_hdul[1].data)

                table.sort(['MJD', 'TELESCOP', 'LINE'])

                out_grouped = table.group_by(['TELESCOP', 'LINE'])
                new_rows = 0
                astropy_table_from_results = None

                apply_cal = caltype == "cal"

                for _, out_group in zip(out_grouped.groups.keys,
                                        out_grouped.groups):
                    out_group = find_cycles(out_group, ['SIGNAL', 'CAL_IS_ON'])

                    grouped = out_group.group_by(['CYCLE'])

                    for _, group in zip(grouped.groups.keys, grouped.groups):
                        # group = vstack([group, group])
                        results, _ = normalize_on_off_cal(group,
                                                          smooth=False,
                                                          apply_cal=apply_cal,
                                                          use_calon=use_calon)
                        if results is None:
                            break
                        if astropy_table_from_results is None:
                            astropy_table_from_results = results
                        else:
                            astropy_table_from_results = \
                                vstack((astropy_table_from_results, results))
                        new_rows += 1
                if astropy_table_from_results is None:
                    continue

                astropy_table_from_results.remove_column('CYCLE')
                astropy_table_from_results.remove_column('SIGNAL')
                astropy_table_from_results.remove_column('CAL_IS_ON')
                astropy_table_from_results.remove_column('BINARY_COL')
                astropy_table_from_results.remove_column('MJD')

                dummy_hdu = \
                    fits.BinTableHDU(data=astropy_table_from_results)
                new_hdul[1].data = dummy_hdu.data

                new_tables[new_filekey] = new_hdul

        self.tables.update(new_tables)
Exemple #17
0
 def to_hdulist(self):
     """Convert to `~astropy.io.fits.HDUList`"""
     hdu = fits.BinTableHDU(self.to_table())
     ebounds = energy_axis_to_ebounds(self[0].energy.bins)
     return fits.HDUList([fits.PrimaryHDU(), hdu, ebounds])
Exemple #18
0
def write_image(outfile, image, meta=None):
    """Writes image object to outfile

    Args:
        outfile : output file string
        image : desispec.image.Image object
            (or any object with 2D array attributes image, ivar, mask)

    Optional:
        meta : dict-like object with metadata key/values (e.g. FITS header)
    """

    log = get_logger()
    if meta is not None:
        hdr = fitsheader(meta)
    else:
        hdr = fitsheader(image.meta)

    add_dependencies(hdr)

    #- Work around fitsio>1.0 writing blank keywords, e.g. on 20191212
    for key in hdr.keys():
        if type(hdr[key]) == fits.card.Undefined:
            log.warning('Setting blank keyword {} to None'.format(key))
            hdr[key] = None

    outdir = os.path.dirname(os.path.abspath(outfile))
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    hx = fits.HDUList()
    hdu = fits.ImageHDU(image.pix.astype(np.float32), name='IMAGE', header=hdr)
    if 'CAMERA' not in hdu.header:
        hdu.header.append(
            ('CAMERA', image.camera.lower(), 'Spectrograph Camera'))

    if 'RDNOISE' not in hdu.header and np.isscalar(image.readnoise):
        hdu.header.append(
            ('RDNOISE', image.readnoise, 'Read noise [RMS electrons/pixel]'))

    hx.append(hdu)
    hx.append(fits.ImageHDU(image.ivar.astype(np.float32), name='IVAR'))
    hx.append(fits.CompImageHDU(image.mask.astype(np.int16), name='MASK'))
    if not np.isscalar(image.readnoise):
        hx.append(
            fits.ImageHDU(image.readnoise.astype(np.float32),
                          name='READNOISE'))

    if hasattr(image, 'fibermap'):
        if isinstance(image.fibermap, Table):
            fmhdu = fits.convenience.table_to_hdu(image.fibermap)
            fmhdu.name = 'FIBERMAP'
        else:
            fmhdu = fits.BinTableHDU(image.fibermap, name='FIBERMAP')

        hx.append(fmhdu)

    hx.writeto(outfile + '.tmp', overwrite=True, checksum=True)
    os.rename(outfile + '.tmp', outfile)

    return outfile
Exemple #19
0
    #------header-----------------
    hdr = fits.Header()
    hdr['lambda'] = 'Vacuum Wavelength in Ang'
    hdr['minwav'] = 'Lower complex fitting wavelength range'
    hdr['maxwav'] = 'Upper complex fitting wavelength range'
    hdr['ngauss'] = 'Number of Gaussians for the line'
    hdr['inisig'] = 'Initial guess of linesigma [in lnlambda]'
    hdr['minsig'] = 'Lower range of line sigma [lnlambda]'
    hdr['maxsig'] = 'Upper range of line sigma [lnlambda]'
    hdr['voff  '] = 'Limits on velocity offset from the central wavelength [lnlambda]'
    hdr['vindex'] = 'Entries w/ same NONZERO vindex constrained to have same velocity'
    hdr['windex'] = 'Entries w/ same NONZERO windex constrained to have same width'
    hdr['findex'] = 'Entries w/ same NONZERO findex have constrained flux ratios'
    hdr['fvalue'] = 'Relative scale factor for entries w/ same findex'
    #------save line info-----------
    hdu = fits.BinTableHDU(data=newdata, header=hdr, name='data')
    hdu.writeto(path + 'qsopar.fits', overwrite=True)
    # =============================================================================

    # =============================================================================
    # 4. Begin the fitting process and append the result to DataFrame
    # =============================================================================

    # whether you want to reuse the computed output or not
    read_from_file = False
    z = 6.345

    # here we do teh Monte-Carlo simulation to propagate the flux error
    for j in range(1):
        if read_from_file == True:
            result = pd.read_csv(path + 'result/result_temp.csv')
Exemple #20
0
def make_cubes(indir, outdir, redo=False, bands=None, bscale=1e-19):
    """ Get results from cutouts and join them in a cube. """
    filenames = os.listdir(indir)
    galaxy = os.path.split(indir)[1]
    fields = set([_.split("_")[-4] for _ in filenames])
    sizes = set([_.split("_")[-2] for _ in filenames])
    bands = context.bands if bands is None else bands
    wave = np.array([context.wave_eff[band] for band in bands]) * u.Angstrom
    flam_unit = u.erg / u.cm / u.cm / u.s / u.AA
    fnu_unit = u.erg / u.s / u.cm / u.cm / u.Hz
    ext = {"swp": "DATA", "swpweight": "WEIGHTS"}
    hfields = ["GAIN", "PSFFWHM", "DATE-OBS"]
    for field, size in itertools.product(fields, sizes):
        cubename = os.path.join(outdir, "{}_{}_{}.fits".format(galaxy, field,
                                                               size))
        if os.path.exists(cubename) and not redo:
            continue
        # Loading and checking images
        imgs = [os.path.join(indir, "{}_{}_{}_{}_swp.fits".format(galaxy,
                field,  band, size)) for band in bands]
        if not all([os.path.exists(_) for _ in imgs]):
            continue
        # Checking if images have calibration available
        headers = [fits.getheader(img, ext=1) for img in imgs]
        if not all(["MAGZP" in h for h in headers]):
            continue
        # Checking if weight images are available
        wimgs = [os.path.join(indir, "{}_{}_{}_{}_swpweight.fits".format(
                 galaxy, field,  band, size)) for band in bands]
        has_errs = all([os.path.exists(_) for _ in wimgs])
        # Making new header with WCS
        h = headers[0].copy()
        del h["FILTER"]
        del h["MAGZP"]
        w = WCS(h)
        nw = WCS(naxis=3)
        nw.wcs.cdelt[:2] = w.wcs.cdelt
        nw.wcs.crval[:2] = w.wcs.crval
        nw.wcs.crpix[:2] = w.wcs.crpix
        nw.wcs.ctype[0] = w.wcs.ctype[0]
        nw.wcs.ctype[1] = w.wcs.ctype[1]
        try:
            nw.wcs.pc[:2, :2] = w.wcs.pc
        except:
            pass
        h.update(nw.to_header())
        # Performin calibration
        m0 = np.array([h["MAGZP"] for h in headers])
        gain = np.array([h["GAIN"] for h in headers])
        f0 = np.power(10, -0.4 * (48.6 + m0))
        data = np.array([fits.getdata(img, 1) for img in imgs])
        fnu = data * f0[:, None, None] * fnu_unit
        flam = fnu * const.c / wave[:, None, None]**2
        flam = flam.to(flam_unit).value / bscale
        if has_errs:
            weights = np.array([fits.getdata(img, 1) for img in wimgs])
            dataerr = 1 / weights + np.clip(data, 0, np.infty) / gain[:, None, None]
            fnuerr= dataerr * f0[:, None, None] * fnu_unit
            flamerr = fnuerr * const.c / wave[:, None, None] ** 2
            flamerr = flamerr.to(flam_unit).value / bscale
        # Making table with metadata
        tab = []
        tab.append(bands)
        tab.append([context.wave_eff[band] for band in bands])
        tab.append([context.exptimes[band] for band in bands])
        names = ["FILTER", "WAVE_EFF", "EXPTIME"]
        for f in hfields:
            if not all([f in h for h in headers]):
                continue
            tab.append([h[f] for h in headers])
            names.append(f)
        tab = Table(tab, names=names)
        # Producing data cubes HDUs.
        hdus = [fits.PrimaryHDU()]
        hdu1 = fits.ImageHDU(flam, h)
        hdu1.header["EXTNAME"] = ("DATA", "Name of the extension")
        hdus.append(hdu1)
        if has_errs:
            hdu2 = fits.ImageHDU(flamerr, h)
            hdu2.header["EXTNAME"] = ("ERRORS", "Name of the extension")
            hdus.append(hdu2)
        for hdu in hdus:
            hdu.header["BSCALE"] = (bscale, "Linear factor in scaling equation")
            hdu.header["BZERO"] = (0, "Zero point in scaling equation")
            hdu.header["BUNIT"] = ("{}".format(flam_unit),
                                   "Physical units of the array values")
        thdu = fits.BinTableHDU(tab)
        hdus.append(thdu)
        thdu.header["EXTNAME"] = "METADATA"
        hdulist = fits.HDUList(hdus)
        hdulist.writeto(cubename, overwrite=True)
def writexipbias(samples,rhosfilenames,xim=False, plots=False,nameterms='terms_dxi.png',namedxip='dxi.png',namecovmat='covmat_pars.png',filename='dxip.fits'):
    from readjson import read_rhos
    from maxlikelihood import bestparameters
    from plot_stats import pretty_rho
    from readfits import read_corr
    from astropy.io import fits
    import numpy as np

    #plot covariance matrix of parameters alpha, beta and eta.
    if plots:
        par_matcov = np.cov(samples)
        corr=corrmatrix(par_matcov)
        print(par_matcov)
        print(corr)
        cov_vmin=np.min(corr)
        plt.imshow(corr,cmap='viridis'+'_r', interpolation='nearest',
                   aspect='auto', origin='lower', vmin=cov_vmin, vmax=1.)
        plt.colorbar()
        plt.title(r'$\alpha \mid \beta \mid \eta $')
        plt.savefig(namecovmat, dpi=500)
        print(namecovmat, 'Printed!')

    a = b = n = 0; vara =  varb =  varn = 0; covab = covan = covbn = 0
    bestpar = bestparameters(samples)
    par_matcov = np.cov(samples) 
    if (par_matcov.size==1 ): variances = par_matcov
    else: variances = np.diagonal(par_matcov)
    covariances = sum( (par_matcov[i,i+1: ].tolist() for i in range(len(samples) - 1)) , [] )
    if(len(samples)==3):
        a, b, n = bestpar
        vara, varb, varn =  variances
        covab, covan, covbn =  covariances
    elif(len(samples)==2):
        a, b = bestpar
        vara, varb =  variances
        covab =  covariances[0]
    elif(len(samples)==1):
        a =  bestpar[0]
        vara =  variances
    else:
        print("Warning, test type not defined")
    
    
    rhonames = args.rhos
    meanr, rho0, cov_rho0 = read_corr(rhonames[0])
    meanr, rho1, cov_rho1 = read_corr(rhonames[1])
    meanr, rho2, cov_rho2 = read_corr(rhonames[2])
    meanr, rho3, cov_rho3 = read_corr(rhonames[3])
    meanr, rho4, cov_rho4 = read_corr(rhonames[4])
    meanr, rho5, cov_rho5 = read_corr(rhonames[5])
    sig_rho0 =  np.sqrt(np.diag(cov_rho0))
    sig_rho1 =  np.sqrt(np.diag(cov_rho1))
    sig_rho2 =  np.sqrt(np.diag(cov_rho2))
    sig_rho3 =  np.sqrt(np.diag(cov_rho3))
    sig_rho4 =  np.sqrt(np.diag(cov_rho4))
    sig_rho5 =  np.sqrt(np.diag(cov_rho5))

    #Ploting each term of the bias
    if(plots):
        xlim = [2., 300.]
        #supposing that a,b and n are idependent of rhos(scale independent)
        var0 = ((2*a*rho0p)**2)*vara +  (a**2)*(sig_rho0**2)
        var1 = ((2*b*rho1p)**2)*varb +  (b**2)*(sig_rho1**2)
        var2 = ((2*n*rho3p)**2)*varn +  (n**2)*(sig_rho3**2)
        varab =  vara*(b**2) + varb*(a**2) + 2*covab*(a*b)
        #varab = ((a*b)**2)*( (vara/((a)**2)) + (varb/((b)**2)) + 2*covab/(a*b) )
        var3 = 4*( (rho2p**2)*varab + (sig_rho2**2)*((a*b)**2)  )
        #var3 = 4*((a*b*rho2p)**2)*( varab/((a*b)**2) + (sig_rho2/rho2p)**2 )
        varbn =  varn*(b**2) + varb*(n**2) + 2*covbn*(b*n)
        #varbn = ((n*b)**2)*( (varn/((n)**2)) + (varb/((b)**2)) + 2*covbn/(b*n) ) 
        var4 = 4*( (rho4p**2)*varbn + (sig_rho4**2)*((n*b)**2)  )
        #var4 = 4*((n*b*rho4p)**2)*(varbn/((b*n)**2) + (sig_rho4/rho4p)**2)
        varan = varn*(a**2) + vara*(n**2) + 2*covan*(a*n)
        #varan = ((n*a)**2)*( (varn/((n)**2)) + (vara/((a)**2)) + 2*covan/(a*n) ) 
        var5 = 4*( (rho5p**2)*varan + (sig_rho5**2)*((n*a)**2)  )
        #var5 = 4*((n*a*rho5p)**2)*(varan/((a*n)**2) + (sig_rho5/rho5p)**2) 
        plt.clf()
        lfontsize = 7
        if (len(samples)==3):
            pretty_rho(meanr, (a**2)*rho0p, np.sqrt(np.diag(cov_rho0)), legend=r'$\alpha^{2} \rho_{0}$',lfontsize=lfontsize,  color='red', ylabel='Correlations', xlim=xlim)
            pretty_rho(meanr, (b**2)*rho1p, np.sqrt(var1), legend=r'$\beta^{2}\rho_{1}$',lfontsize=lfontsize,  color='green', ylabel='Correlations', xlim=xlim)
            pretty_rho(meanr, (n**2)*rho3p, np.sqrt(var2), legend=r'$\eta^{2}\rho_{3}$', lfontsize=lfontsize, color='black', ylabel='Correlations', xlim=xlim)
            pretty_rho(meanr, (2*a*b)*rho2p, np.sqrt(var3), legend=r'$2\alpha\beta \rho_{2}$',lfontsize=lfontsize,  color='yellow', ylabel='Correlations', xlim=xlim)
            pretty_rho(meanr, (2*b*n)*rho4p, np.sqrt(var4), legend=r'$2\beta\eta\rho_{4}$',lfontsize=lfontsize,  color='blue', ylabel='Correlations', xlim=xlim)
            pretty_rho(meanr, (2*n*a)*rho5p, np.sqrt(var5), legend=r'$2\eta\alpha\rho_{5}$', lfontsize=lfontsize, color='gray', ylabel='Correlations', xlim=xlim)
            print('Printing',  nameterms)
            plt.savefig(nameterms, dpi=200)
        if (len(samples)==2):
            pretty_rho(meanr, (a**2)*rho0p, np.sqrt(var0), legend=r'$\alpha^{2} \rho_{0}$',lfontsize=lfontsize,  color='red', ylabel='Correlations', xlim=xlim)
            pretty_rho(meanr, (b**2)*rho1p, np.sqrt(var1), legend=r'$\beta^{2}\rho_{1}$',lfontsize=lfontsize,  color='green', ylabel='Correlations', xlim=xlim)
            pretty_rho(meanr, (2*a*b)*rho2p, np.sqrt(var3), legend=r'$2\alpha\beta \rho_{2}$',lfontsize=lfontsize,  color='yellow', ylabel='Correlations', xlim=xlim)
            print('Printing',  nameterms)
            plt.savefig(nameterms, dpi=200)
        if (len(samples)==1):
            pretty_rho(meanr, (a**2)*rho0p, np.sqrt(var0), legend=r'$\alpha^{2} \rho_{0}$',lfontsize=lfontsize,  color='red', ylabel='Correlations', xlim=xlim)
            print('Printing',  nameterms)
            plt.savefig(nameterms, dpi=200)
    
    #supposing that a,b and n are idependent of rhos(scale independent)
    dxip = (a**2)*rho0p + (b**2)*rho1p + (n**2)*rho3p + (2*a*b)*rho2p + (2*b*n)*rho4p + (2*n*a)*rho5p
    f1 = 2*(a*rho0p + b*rho2p + n*rho5p)     
    f2 = 2*(b*rho1p + a*rho2p + n*rho4p)
    f3 = 2*(n*rho3p + b*rho4p + a*rho5p)
    f4 = a**2 ; f5 = b**2; f6 = 2*a*b
    f7 = n**2 ; f8 = 2*b*n; f9 = 2*n*a 
    covmat_dxip = np.diag( (f1**2)*vara + (f2**2)*varb + (f3**2)*varn + + 2*(f1*f2*covab + f1*f3*covan + f2*f3*covbn) ) \
    + (f4**2)*(cov_rho0) + (f5**2)*(cov_rho1) + (f6**2)*(cov_rho2) + (f7**2)*(cov_rho3) +(f8**2)*(cov_rho4) + (f9**2)*(cov_rho5) 

    if(plots):
        plt.clf()
        pretty_rho(meanr, dxip, np.sqrt(np.diag(covmat_dxip)) , legend=r"$\delta \xi_{+}$",  ylabel=r"$\delta \xi_{+}$",  xlim=xlim)
        print('Printing',  dxipname)
        plt.savefig(dxipname, dpi=150)

    nrows = len(dxip)
    hdu = fits.PrimaryHDU()
    hdul = fits.HDUList([hdu])
    covmathdu = fits.ImageHDU(covmat_dxip, name='COVMAT')
    hdul.insert(1, covmathdu)
    angarray = meanr
    valuearray =  np.array(dxip)
    bin1array = np.array([ -999]*nrows)
    bin2array = np.array([ -999]*nrows)
    angbinarray = np.arange(nrows)
    array_list = [bin1array, bin2array, angbinarray, valuearray,  angarray ]
    for array, name in zip(array_list, names): outdata[name] = array 
    corrhdu = fits.BinTableHDU(outdata, name=nam)
    hdul.insert(2, corrhdu)
    if xim:
        hdul.writeto(filename + 'm.fits', clobber=True)
    else:
        hdul.writeto(filename + 'p.fits', clobber=True)
Exemple #22
0
def filter_evt_file(fits, outfile='./cleaned_file.fits', obsid = ''):
    """
    fiter the events in the fits file by dtf and status
    input:  fits    --- fits file
    output: fits    --- filtered fits file
    """
#
#--- get obsid
#
    if obsid == '':
        atemp = re.split('hrcf', fits)
        btemp = re.split('_',    atemp[1])
        obsid = btemp[0]
#
#--- copy the fits file and make sure that it is not zipped
#
    mc = re.search('gz', fits)
    if mc is not None:
        cmd = 'cp ' + fits + ' ztemp1.fits.gz'
        os.system(cmd)
        cmd = 'gzip -d ztemp1.fits.gz'
        os.system(cmd)
    else:
        cmd = 'cp ' + fits + ' ztemp1.fits'
        os.system(cmd)
#
#--- filter out dtf <= 0.98
#
    [pstart, pstop] = get_dead_period(obsid)
    
    t      = pyfits.open('ztemp1.fits')
    tdata  = t[1].data
    t_list = tdata['time']
    t.close()
#
#--- if there are clipped parts based of dtf, create mask for that
#
    clen = len(pstart)
    if clen > 0:
        m    = 0
        mask = []
        cend = 0
        for t in t_list:
            if cend == 0:
                for k in range(m, clen):
                    if t < pstart[k]:
                        mask.append(True)
                        break
                    elif (t >= pstart[k]) and (t <= pstop[k]):
                        mask.append(False)
                        break
                    elif t > pstop[k]:
                        if k == clen -1:
                            cend = 1
                            break
                        m = k  
                        continue
            else:
                mask.append(True)
        mask.append(False)
    
        mask = numpy.array(mask)
    
        newtdata = tdata[mask]
#
#--- remove the masked part and create a new fits file
#
        hdu = pyfits.BinTableHDU(data=newtdata)
        hdu.writeto('ztemp2.fits')
    
        cmd = 'mv -f ztemp2.fits ztemp1.fits'
        os.system(cmd)
#
#--- filter by status
#
    filter_by_status('ztemp1.fits')

    cmd = 'mv -f  ztemp1.fits ' + outfile
    os.system(cmd)
Exemple #23
0
         1., 1.82574186, 3.16227766, 5.77350269, 0., 0., 0., 0., 0., 0., 0.,
         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
     ])),
    ('Theta_inc', 0, 18.200001, 5.0, 0.0, 18.200001, 87.099998, 90.0, 3,
     numpy.array([
         0, 60, 90, 0.0, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
         0., 0., 0., 0., 0., 0., 0.
     ])),
],
                         dtype=dtype)
assert numpy.product(parameters['NUMBVALS']) == len(table), (
    'parameter definition does not match spectra table',
    parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table))
hdu = pyfits.BinTableHDU(data=parameters)
hdu.header['DATE'] = nowstr
hdu.header['EXTNAME'] = 'PARAMETERS'
hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL'
hdu.header['HDUVERS1'] = '1.0.0'
hdu.header['NINTPARM'] = len(parameters)
hdu.header['NADDPARM'] = 0
hdu.header['HDUCLAS2'] = 'PARAMETERS'
hdus.append(hdu)

# ENERG_LO, ENERG_HI
dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')]
energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype)
hdu = pyfits.BinTableHDU(data=energies)
hdu.header['DATE'] = nowstr
hdu.header['EXTNAME'] = 'ENERGIES'
            "RA_SCZ"), f['SC_DATA'].data.field("DEC_SCZ")

        ang_dis = getAngularDistance(args.ra, args.dec, ra_scz, dec_scz)

        # Find the time where the distance was at its minimum (keeping some margin to later select the data)
        min_idx = ang_dis[:-n_entries].argmin()

        time_of_min_distance = f['SC_DATA'].data.field("START")[min_idx]

        log.info("At MET = %.3f the distance between the pointing and the "
                 "input position was %.3f deg" %
                 (time_of_min_distance, ang_dis[min_idx]))

        # Now create a new FT2 file with the pointing fixed at the desired position

        new_sc_table = pyfits.BinTableHDU(f['SC_DATA'].data[min_idx:min_idx +
                                                            n_entries])

        new_sc_table.data.RA_SCZ[:] = new_sc_table.data.RA_SCZ[0]
        new_sc_table.data.DEC_SCZ[:] = new_sc_table.data.DEC_SCZ[0]
        new_sc_table.data.RA_SCX[:] = new_sc_table.data.RA_SCX[0]
        new_sc_table.data.DEC_SCX[:] = new_sc_table.data.DEC_SCX[0]

        new_sc_table.data.RA_ZENITH[:] = new_sc_table.data.RA_ZENITH[0]
        new_sc_table.data.DEC_ZENITH[:] = new_sc_table.data.DEC_ZENITH[0]

        new_sc_table.data.RA_NPOLE[:] = new_sc_table.data.RA_NPOLE[0]
        new_sc_table.data.DEC_NPOLE[:] = new_sc_table.data.DEC_NPOLE[0]

        new_sc_table.data.ROCK_ANGLE[:] = new_sc_table.data.ROCK_ANGLE[0]

        new_sc_table.data.START = np.arange(args.met_start, stop_time, dt)
Exemple #25
0
 def to_hdulist(self, name=None):
     """Convert to `~astropy.io.fits.HDUList`."""
     return fits.HDUList(
         [fits.PrimaryHDU(), fits.BinTableHDU(self.to_table(), name=name)]
     )
Exemple #26
0
    def _process(self, prod, sci_catalog, astrometry=True):
        if prod.calibed_files is None:
            raise ValueError("Product failed: No calibrated images. raw files:"
                             " {}".format(prod.files.files))

        fm = FileManager(self.image_ext)
        if check_iterable(prod.calibed_files):
            if len(prod.calibed_files) > 1:
                fg = fm.create_filegroup(files=prod.calibed_files,
                                         ext=self.image_ext)
                stacked = None
                for i in fg.hdus():
                    if stacked is None:
                        stacked = i
                    else:
                        s = hdu_shift_images([stacked, i],
                                             method=self._align_method)[1]
                        stacked = imarith(stacked, s, '+')
            elif len(prod.calibed_files) == 1:
                stacked = prod.calibed_files[0]
                fg = fm.create_filegroup(files=prod.calibed_files,
                                         ext=self.image_ext)
                stacked = check_hdu(stacked, ext=self.image_ext)
            else:
                raise ValueError("Product failed: No calibrated images."
                                 "raw files: {}".format(prod.files.files))
                return
        else:
            stacked = prod.calibed_files
            fg = fm.create_filegroup(files=prod.calibed_files,
                                     ext=self.image_ext)
            stacked = check_hdu(stacked, ext=self.image_ext)

        filt = self.get_filter(fg)
        cat = self.select_catalog(filt)

        wcs = WCS(stacked.header, relax=True)
        if '' in wcs.wcs.ctype or astrometry:
            wcs = None

        plate_scale = self.get_platescale(stacked)
        phot, wcs = process_calib_photometry(stacked,
                                             science_catalog=sci_catalog,
                                             identify_catalog=cat,
                                             filter=filt,
                                             wcs=wcs,
                                             return_wcs=True,
                                             plate_scale=plate_scale,
                                             **self.photometry_parameters,
                                             **self.astrometry_parameters)

        apertures = np.unique(phot['aperture'])
        if len(apertures) > 1:
            selected_aperture = 0
            snr = 0
            for g in phot.group_by('aperture').groups:
                g_snr = np.sum(g['flux'] / g['flux_error'])
                if g_snr > snr:
                    selected_aperture = g['aperture'][0]
                    snr = g_snr

            phot = phot[phot['aperture'] == selected_aperture]
            phot = phot.as_array()

        phot.meta['astropop n_images'] = len(prod.calibed_files)
        phot.meta['astropop night'] = prod.files.values('night',
                                                        unique=True)[0]
        phot.meta['astropop filter'] = filt
        stacked.header['astropop n_images'] = len(prod.calibed_files)
        stacked.header['astropop night'] = prod.files.values('night',
                                                             unique=True)[0]
        stacked.header['astropop filter'] = filt

        header = stacked.header
        if wcs is not None:
            header.update(wcs.to_header(relax=True))

        imhdu = fits.PrimaryHDU(stacked.data, header=header)
        tbhdu = fits.BinTableHDU(phot, name='photometry')

        if prod.sci_result is None:
            filename = self.get_filename(fg)
        else:
            filename = prod.sci_result

        mkdir_p(os.path.dirname(filename))
        fits.HDUList([imhdu, tbhdu]).writeto(filename)
Exemple #27
0
 def to_fits(self):
     header = self._prepare_fits_header(self.header)
     table = Table.from_pandas(self.data)
     hdu = fits.BinTableHDU(table, header)
     return hdu
Exemple #28
0
def grab_sdss_spectra(radec,
                      radius=0.1 * u.deg,
                      outfil=None,
                      debug=False,
                      maxsep=None,
                      timeout=600.,
                      zmin=None):
    """ Grab SDSS spectra

    Parameters
    ----------
    radec : tuple
      RA, DEC in deg
    radius : float, optional (0.1*u.deg)
      Search radius -- Astroquery actually makes a box, not a circle
    timeout : float, optional
      Timeout limit for connection with SDSS
    outfil : str ('tmp.fits')
      Name of output file for FITS table
    maxsep : float (None) :: Mpc
      Maximum separation to include
    zmin : float (None)
      Minimum redshift to include

    Returns
    -------
    tbl : Table

    """

    cC = coords.SkyCoord(ra=radec[0], dec=radec[1])

    # Query
    photoobj_fs = ['ra', 'dec', 'objid', 'run', 'rerun', 'camcol', 'field']
    mags = [
        'petroMag_u', 'petroMag_g', 'petroMag_r', 'petroMag_i', 'petroMag_z'
    ]
    magsErr = [
        'petroMagErr_u', 'petroMagErr_g', 'petroMagErr_r', 'petroMagErr_i',
        'petroMagErr_z'
    ]

    phot_catalog = SDSS.query_region(cC,
                                     spectro=True,
                                     radius=radius,
                                     timeout=timeout,
                                     photoobj_fields=photoobj_fs + mags +
                                     magsErr)  # Unique
    spec_catalog = SDSS.query_region(cC,
                                     spectro=True,
                                     radius=radius,
                                     timeout=timeout)  # Duplicates exist
    nobj = len(phot_catalog)

    #
    print('grab_sdss_spectra: Found {:d} sources in the search box.'.format(
        nobj))

    # Coordinates
    cgal = SkyCoord(ra=phot_catalog['ra'] * u.degree,
                    dec=phot_catalog['dec'] * u.degree)
    sgal = SkyCoord(ra=spec_catalog['ra'] * u.degree,
                    dec=spec_catalog['dec'] * u.degree)
    sepgal = cgal.separation(cC)  #in degrees

    # Check for problems and parse z
    zobj = np.zeros(nobj)
    idx, d2d, d3d = coords.match_coordinates_sky(cgal, sgal, nthneighbor=1)
    if np.max(d2d) > 1. * u.arcsec:
        print('No spectral match!')
        xdb.set_trace()
    else:
        zobj = spec_catalog['z'][idx]

    idx, d2d, d3d = coords.match_coordinates_sky(cgal, cgal, nthneighbor=2)
    if np.min(d2d.to('arcsec')) < 1. * u.arcsec:
        print('Two photometric sources with same RA/DEC')
        xdb.set_trace()

    #xdb.set_trace()

    # Cut on Separation
    if not maxsep is None:
        print('grab_sdss_spectra: Restricting to {:g} Mpc separation.'.format(
            maxsep))
        sepgal_kpc = cosmo.kpc_comoving_per_arcmin(zobj) * sepgal.to('arcmin')
        sepgal_mpc = sepgal_kpc.to('Mpc')
        gdg = np.where(sepgal_mpc < (maxsep * u.Unit('Mpc')))[0]
        phot_catalog = phot_catalog[gdg]
        #xdb.set_trace()

    nobj = len(phot_catalog)
    print('grab_sdss_spectra: Grabbing data for {:d} sources.'.format(nobj))

    # Grab Spectra from SDSS

    # Generate output table
    attribs = galaxy_attrib()
    npix = 5000  #len( spec_hdus[0][1].data.flux )
    spec_attrib = [(str('FLUX'), np.float32, (npix, )),
                   (str('SIG'), np.float32, (npix, )),
                   (str('WAVE'), np.float64, (npix, ))]
    tbl = np.recarray((nobj, ), dtype=attribs + spec_attrib)

    tbl['RA'] = phot_catalog['ra']
    tbl['DEC'] = phot_catalog['dec']
    tbl['TELESCOPE'] = str('SDSS 2.5-M')

    # Deal with spectra separately (for now)
    npix = 5000  #len( spec_hdus[0][1].data.flux )

    for idx, obj in enumerate(phot_catalog):
        #print('idx = {:d}'.format(idx))

        # Grab spectra (there may be duplicates)
        mt = np.where(
            sgal.separation(cgal[idx]).to('arcsec') < 1. * u.Unit('arcsec'))[0]
        if len(mt) > 1:
            # Use BOSS if you have it
            mmt = np.where(spec_catalog[mt]['instrument'] == 'BOSS')[0]
            if len(mmt) > 0:
                mt = mt[mmt[0]]
            else:
                mt = mt[0]
        elif len(mt) == 0:
            xdb.set_trace()
        else:
            mt = mt[0]

        # Grab spectra
        spec_hdus = SDSS.get_spectra(matches=Table(spec_catalog[mt]))

        tbl[idx]['INSTRUMENT'] = spec_catalog[mt]['instrument']
        spec = spec_hdus[0][1].data
        npp = len(spec.flux)
        tbl[idx]['FLUX'][0:npp] = spec.flux
        sig = np.zeros(npp)
        gdi = np.where(spec.ivar > 0.)[0]
        if len(gdi) > 0:
            sig[gdi] = np.sqrt(1. / spec.ivar[gdi])
        tbl[idx]['SIG'][0:npp] = sig
        tbl[idx]['WAVE'][0:npp] = 10.**spec.loglam

        # Redshifts
        meta = spec_hdus[0][2].data
        for attrib in ['Z', 'Z_ERR']:
            tbl[idx][attrib] = meta[attrib]

        if debug:
            sep_to_qso = cgal[idx].separation(cC).to('arcmin')
            print('z = {:g}, Separation = {:g}'.format(tbl[idx].Z, sep_to_qso))
            xdb.set_trace()

        # Fill in rest
        tbl[idx].SDSS_MAG = np.array([obj[phot] for phot in mags])
        tbl[idx].SDSS_MAGERR = np.array([obj[phot] for phot in magsErr])

    # Clip on redshift to excise stars/quasars
    if zmin is not None:
        gd = np.where(tbl['Z'] > zmin)[0]
        tbl = tbl[gd]

    # Write to FITS file
    if outfil is not None:
        prihdr = fits.Header()
        prihdr['COMMENT'] = 'SDSS Spectra'
        prihdu = fits.PrimaryHDU(header=prihdr)

        tbhdu = fits.BinTableHDU(tbl)

        thdulist = fits.HDUList([prihdu, tbhdu])
        thdulist.writeto(outfil, clobber=True)

    print('Wrote SDSS table to {:s}'.format(outfil))
    return tbl
Exemple #29
0
def mk_ccf_mask(template, doplot=False):
    # CSV table to contain systemic velocities. Will replace the entry of the same object if it is already present
    # in the table, will create the table if it does not exist
    systemic_velocity_table = 'systemic_velo.csv'

    print(template)

    # Path where models are saved
    path_to_models = 'HiResFITS'

    # some parameters, don't worry
    dv = 0.00  # km/s -- width of the CCF box
    c = (constants.c / 1000)

    # create directory if needed
    if not os.path.isdir(path_to_models):
        os.system('mkdir {0}'.format(path_to_models))

    # read wavelength and flux. The wavelength is expressed in Ang, we convert to µm
    ftp_link = 'ftp://phoenix.astro.physik.uni-goettingen.de/HiResFITS/'
    wave_file = 'WAVE_PHOENIX-ACES-AGSS-COND-2011.fits'
    if not os.path.isfile(path_to_models + '/' + wave_file):
        os.system('wget {0}{1}'.format(ftp_link, wave_file))
        os.system('mv {0} {1}'.format(wave_file, path_to_models))
    wave_phoenix = fits.getdata(path_to_models + '/' + wave_file) / 10

    if 'FP' not in template:
        # get goettigen models if you don't have them.
        for temperature in np.arange(3000, 6100, 100):
            temperature = str(np.int(np.round(temperature, -2)))
            outname = '{0}/lte0{1}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'.format(
                path_to_models, temperature)

            if not os.path.isfile(outname):
                os.system(
                    'wget {0}PHOENIX-ACES-AGSS-COND-2011/Z-0.0/lte0{1}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'
                    .format(ftp_link, temperature))

                os.system(
                    'mv lte0{1}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits {0}'
                    .format(
                        path_to_models,
                        temperature,
                    ))

    # read template and header
    tbl, hdr = fits.getdata(template, ext=1, header=True)

    if 'FP' not in template:
        hdr2 = fits.getheader(template, ext=2)
        nsp_input = hdr2['NAXIS2']
    else:
        nsp_input = 0
        hdr['OBJECT'] = 'FP'

    out_pos_name = hdr['OBJECT'].upper() + '_pos.fits'
    if os.path.isfile(out_pos_name):
        print('File {} exists, we skip'.format(out_pos_name))
        return

    if 'FP' not in template:
        # round temperature in header to nearest 100 and get the right model
        if 'OBJTEMP' in hdr:
            temperature = hdr['OBJTEMP']
            if temperature < 3000:
                temperature = 3000
            if temperature > 6000:
                temperature = 6000

            temperature = str(np.int(np.round(temperature, -2)))
        else:
            temperature = '3600'

        print('Temperature = ', temperature)
        outname = '{0}/lte0{1}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'.format(
            path_to_models, temperature)
        print('Model file = ', outname)
        flux_phoenix = fits.getdata(outname)

    # get wave and flux vectors
    w = np.array(tbl['wavelength'])
    f = np.array(tbl['flux'])

    f2 = np.array(f)
    mask = np.isfinite(f)
    f2[~mask] = 0
    mask = mask * 1.0
    f = np.convolve(f2, np.ones(5), mode='same') / np.convolve(
        mask, np.ones(5), mode='same')

    # find the first and second derivative of the flux
    df = np.gradient(f)
    ddf = np.gradient(np.gradient(f))

    # lines are regions there is a sign change in the derivative of the flux
    # we also have some checks for NaNs
    line = np.where((np.sign(df[1:]) != np.sign(df[:-1]))
                    & np.isfinite(ddf[1:])
                    & np.isfinite(df[1:])
                    & np.isfinite(df[:-1]))[0]

    # create the output table
    tbl = dict()
    tbl['ll_mask_s'] = np.zeros_like(line, dtype=float)
    tbl['ll_mask_e'] = np.zeros_like(line, dtype=float)
    # the weight is the second derivative of the flux. The sharper the line,
    # the more weight we give it
    tbl['w_mask'] = ddf[line]
    tbl['value'] = f[line]

    tbl['depth'] = np.zeros_like(tbl['value'])

    tbl['depth'][1:-1] = 1 - tbl['value'][1:-1] / (
        (tbl['value'][0:-2] + tbl['value'][2:]) / 2)

    for i in tqdm(range(len(line))):
        # we perform a linear interpolation to find the exact wavelength
        # where the derivatives goes to zero
        wave_cen = (np.polyfit(df[line[i]:line[i] + 2], w[line[i]:line[i] + 2],
                               1))[1]

        # we offset that wavelength by the systemic velocity and subtract
        # half of the line width
        corrv = np.sqrt((1 + (-dv / 2) / c) / (1 - (-dv / 2) / c))
        tbl['ll_mask_s'][i] = wave_cen * corrv

        # same but for the upper bound to the line position
        corrv = np.sqrt((1 + (dv / 2) / c) / (1 - (dv / 2) / c))
        tbl['ll_mask_e'][i] = wave_cen * corrv

    weight = tbl['w_mask']

    systemic_velocity = 0

    if 'FP' not in template:
        # create a spline of the model
        model = InterpolatedUnivariateSpline(wave_phoenix, flux_phoenix)

        # assume a 0 velocity and search
        dv0 = 0
        scale = 1.0

        tbl0 = Table(tbl)

        low_contrast = False

        for ite in range(3):
            corrv = np.sqrt(
                (1 + systemic_velocity / c) / (1 - systemic_velocity / c))
            tbl['ll_mask_s'] = tbl0['ll_mask_s'] / corrv
            tbl['ll_mask_e'] = tbl0['ll_mask_e'] / corrv

            wavelines = (tbl['ll_mask_s'] + tbl['ll_mask_e']) / 2.0

            dvs = np.arange(400, dtype=float)
            dvs -= np.mean(dvs)

            dvs *= scale
            #dvs += systemic_velocity

            neg_mask = weight > 0

            weight_tmp = weight[neg_mask]
            wave_tmp = wavelines[neg_mask]

            cc = np.zeros_like(dvs)
            for i in range(len(dvs)):
                corrv = np.sqrt((1 + dvs[i] / c) / (1 - dvs[i] / c))
                cc[i] = np.sum(weight_tmp * model(wave_tmp / corrv))

            # just centering the cc around one and removing low-f trends
            #cc = (cc / medfilt(cc, 21))

            minpos = np.argmin(cc)
            fit = np.polyfit(dvs[minpos - 1:minpos + 2],
                             cc[minpos - 1:minpos + 2], 2)

            if doplot:

                plt.plot(dvs + systemic_velocity, cc, alpha=0.5)

            systemic_velocity += (-.5 * fit[1] / fit[0])
            print(systemic_velocity)
            scale /= 5.0

            if np.min(cc) / np.max(cc) > 0.95:
                low_contrast = True
                print('not enough ccf contrast, will end after the plot')

        if doplot:
            plt.title('CCF of model SP with target'
                      's line list\nThis gets you the systemic velocity')
            plt.xlabel('Velocity')
            plt.ylabel('Abritrary flux')
            plt.show()
        if low_contrast:
            return

        hdr['SYSVELO'] = systemic_velocity, 'meas. systemic velocity (km/s)'
        hdr['VELOFILE'] = outname, 'model used for SYSVEL cc'

        print(
            '\n\tsystemic velocity : {0:.2f}km/s\n'.format(systemic_velocity))

        if os.path.isfile(systemic_velocity_table) == False:
            tbl_sysvelo = Table()
            tbl_sysvelo['OBJECT'] = [hdr['OBJECT']]
            tbl_sysvelo['SYSTEMIC_VELOCITY'] = [systemic_velocity]
            tbl_sysvelo['MODEL_FILE'] = [hdr['VELOFILE']]

            print('We create {0}'.format(systemic_velocity_table))
            tbl_sysvelo.write(systemic_velocity_table)

        else:
            tbl_old = Table.read(systemic_velocity_table)
            tbl_sysvelo = Table()

            tbl_sysvelo['OBJECT'] = np.append(hdr['OBJECT'], tbl_old['OBJECT'])
            tbl_sysvelo['SYSTEMIC_VELOCITY'] = np.append(
                systemic_velocity, tbl_old['SYSTEMIC_VELOCITY'])
            tbl_sysvelo['MODEL_FILE'] = np.append(hdr['VELOFILE'],
                                                  tbl_old['MODEL_FILE'])

            print('We append {0}'.format(systemic_velocity_table))
            tbl_sysvelo.write(systemic_velocity_table, overwrite=True)

    # convert back to table for manipulation
    tbl = et.td_convert(tbl)

    if 'FP' not in template:
        valid = np.isfinite(f)
        spline = InterpolatedUnivariateSpline(w[valid], f[valid], k=1, ext=0)
        # DETERMINATION OF H-band FWHM
        # cen, ew, amp, zp, slope
        dvs = np.arange(-50000, 50000, 500) + systemic_velocity * 1000
        cc = np.zeros_like(dvs, dtype=float)

        H = (tbl['ll_mask_s'] > 1500) * (tbl['ll_mask_s'] >
                                         1800) * (tbl['w_mask'] > 0)
        wave_H = np.array(tbl['ll_mask_s'][H])
        weights_H = np.array(tbl['w_mask'][H])
        for i in range(len(dvs)):
            cc[i] = np.sum(weights_H * spline(et.doppler(wave_H, -dvs[i])))

        imin = np.nanargmin(cc)
        p0 = [
            dvs[imin], 4000,
            np.nanmin(cc) - np.nanmedian(cc),
            np.nanmedian(cc), 0
        ]

        fit_gau = et.fit_gauss(dvs, cc, p0)
        gfit = et.gauss(dvs, *fit_gau)

        cc /= np.polyval(fit_gau[[4, 3]], dvs)
        gfit /= np.polyval(fit_gau[[4, 3]], dvs)

        print(fit_gau)

        if doplot:
            plt.plot(dvs / 1000,
                     cc,
                     color='black',
                     alpha=0.5,
                     label='normalized CCF')
            plt.plot(dvs / 1000,
                     gfit,
                     alpha=0.5,
                     label='normalized gaussian fit')
            plt.ylabel('flux')
            plt.xlabel('velocity [km/s]')
            plt.legend()
            plt.show()

        hdr['CCF_FWHM'] = np.sqrt(
            2 * np.log(2)) * 2 * fit_gau[1] / 1000, 'H-band CCF FWHM in km/s'
        hdr['CCF_CONT'] = 1 - np.min(cc), 'Fractionnal CCF contrast'

    if doplot:

        plt.plot(w, f, 'g-', label='input spectrum')
        plt.vlines(tbl[tbl['w_mask'] < 0]['ll_mask_s'],
                   np.nanmin(f),
                   np.nanmax(f),
                   'k',
                   alpha=0.2,
                   label='positive feature')
        plt.vlines(tbl[tbl['w_mask'] > 0]['ll_mask_s'],
                   np.nanmin(f),
                   np.nanmax(f),
                   'r',
                   alpha=0.2,
                   label='negative feature')
        plt.legend()
        plt.xlabel('Wavelength [nm]')
        plt.ylabel('Arbitrary flux')
        plt.show()

    # write the output table
    fits.writeto(hdr['OBJECT'] + '.fits', tbl, hdr, overwrite=True)

    pos_mask = tbl['w_mask'] < 0
    neg_mask = tbl['w_mask'] > 0

    tbl['w_mask'] /= np.nanmean(np.abs(tbl['w_mask']))

    tbl[pos_mask].write(hdr['OBJECT'] + '_pos.csv',
                        format='ascii',
                        overwrite=True)
    tbl[neg_mask].write(hdr['OBJECT'] + '_neg.csv',
                        format='ascii',
                        overwrite=True)
    tbl.write(hdr['OBJECT'] + '_full.mas', format='ascii', overwrite=True)

    tbl2 = tbl[tbl['w_mask'] > 0]
    tbl2['w_mask'] /= np.nanmedian(tbl2['w_mask'])
    tbl2['depth'] /= np.nanmedian(tbl2['depth'])
    tbl2['depth'] = np.abs(tbl2['depth'])

    f = open(hdr['OBJECT'] + '_neg.mas', 'w')
    for i in range(len(tbl2)):
        f.write('      ' + '      '.join([
            str(tbl2['ll_mask_s'][i])[0:14],
            str(tbl2['ll_mask_e'][i])[0:14],
            str(tbl2['w_mask'][i])[0:12]
        ]) + '\n')
    f.close()

    f = open(hdr['OBJECT'] + '_neg_depth.mas', 'w')
    for i in range(len(tbl2)):
        f.write('      ' + '      '.join([
            str(tbl2['ll_mask_s'][i])[0:14],
            str(tbl2['ll_mask_e'][i])[0:14],
            str(tbl2['depth'][i])[0:12]
        ]) + '\n')
    f.close()

    tbl2 = tbl[tbl['w_mask'] < 0]
    tbl2['w_mask'] /= np.nanmedian(tbl2['w_mask'])

    hdu1 = fits.PrimaryHDU()
    hdu1.header['SYSTVEL'] = systemic_velocity, 'Systemic velocity'
    hdu1.header['NSPTEMPL'] = nsp_input, 'Number of spectra used for tempalte'

    keys_transfer = ['OBJTEMP', 'PI_NAME', 'CCF_FWHM', 'CCF_CONT']
    for key in keys_transfer:
        if key in hdr.keys():
            hdu1.header[key] = hdr[key]

    hdu2 = fits.BinTableHDU(tbl2)
    # convert back from dictionnary to table and save
    new_hdul = fits.HDUList([hdu1, hdu2])
    new_hdul.writeto(out_pos_name, overwrite=True)
Exemple #30
0
                        meta=hdul['AGE_LW'].header,
                        wcs=WCS(hdul['AGE_LW'].header))

    tmp = nebulae[nebulae['gal_name'] == gal_name]
    positions = np.transpose((tmp['cen_x'], tmp['cen_y']))
    apertures = CircularAperture(positions, 2)
    ages_mw = aperture_photometry(age_mw,
                                  apertures)['aperture_sum'] / apertures.area
    ages_lw = aperture_photometry(age_lw,
                                  apertures)['aperture_sum'] / apertures.area

    nebulae['age_mw'][nebulae['gal_name'] == gal_name] = ages_mw
    nebulae['age_lw'][nebulae['gal_name'] == gal_name] = ages_lw

# write to file
columns = ['gal_name', 'region_ID', 'age_mw', 'age_lw']

doc = f'''this catalogue contains the ages from the stellar populations
fit measured at the position of the nebulae.
last update: {date.today().strftime("%b %d, %Y")}
'''

primary_hdu = fits.PrimaryHDU()
for i, comment in enumerate(doc.split('\n')):
    if i == 0:
        primary_hdu.header['COMMENT'] = comment
    else:
        primary_hdu.header[''] = comment
table_hdu = fits.BinTableHDU(nebulae[columns])
hdul = fits.HDUList([primary_hdu, table_hdu])
hdul.writeto('Nebulae_Catalogue_v2p1_age.fits', overwrite=True)