Example #1
0
def plot_LSST_wcs(filename):
    hdulist = fits.open(filename)
    wcs = WCS(hdulist[1].header)

    hpx_header = {'NAXIS': 2,
                  'CTYPE1': 'RA---HPX',
                  'CTYPE2': 'DEC--HPX',
              }
    hpx = WCS(hpx_header)
    
    origin = 0
    xpix, ypix = np.meshgrid(*[np.arange(0, hdulist[1].header[key], 200)
                               for key in ['NAXIS1', 'NAXIS2']])
    Xpix = np.vstack(map(np.ravel, (xpix, ypix))).T

    for func in [wcs.wcs_pix2world, wcs.all_pix2world]:
        Xworld = func(Xpix, origin)
        Xhpx = hpx.wcs_world2pix(Xworld, origin)

        x, y = Xhpx.T

        plt.plot(x, y, '.')
        plt.gca().set_aspect('equal')
        plt.xlim(0, 360)
        plt.ylim(-90, 90)
    plt.show()
Example #2
0
def get_facet_values(facet, ra, dec, root="facet", default=0):
    """
    Extract the value from a fits facet file
    """
    import numpy as np
    from astropy.io import fits
    from astropy.wcs import WCS

    # TODO: Check astropy version
    # TODO: Check facet is a fits file

    with fits.open(facet) as f:
        shape = f[0].data.shape

        w = WCS(f[0].header)
        freq = w.wcs.crval[2]
        stokes = w.wcs.crval[3]

        xe, ye, _1, _2 = w.all_world2pix(ra, dec, freq, stokes, 1)
        x, y = np.round(xe).astype(int), np.round(ye).astype(int)

        # Dummy value for points out of the fits area
        x[(x < 0) | (x >= shape[-1])] = -1
        y[(y < 0) | (y >= shape[-2])] = -1

        data = f[0].data[0,0,:,:]

        values = data[y, x]

        # Assign the default value to NaNs and points out of the fits area
        values[(x == -1) | (y == -1)] = default
        values[np.isnan(values)] = default

        #TODO: Flexible format for other data types ?
        return np.array(["{}_{:.0f}".format(root, val) for val in values])
Example #3
0
    def setup_class(self):

        np.random.seed(12345)

        w = WCS(naxis=2)

        lon = np.linspace(10., 11., 5)
        lat = np.linspace(20., 21., 5)

        self.tmpdir = tempfile.mkdtemp()
        os.mkdir(os.path.join(self.tmpdir, 'raw'))

        for i in range(len(lon)):
            for j in range(len(lat)):

                w.wcs.crpix = [50.5, 50.5]
                w.wcs.cdelt = np.array([-0.0066667, 0.0066667])
                w.wcs.crval = [lon[i], lat[j]]
                w.wcs.ctype = [b"RA---TAN", b"DEC--TAN"]
                w.wcs.crota = [0, np.random.uniform(0., 360.)]

                header = w.to_header()

                hdu = fits.PrimaryHDU(header=header)
                hdu.data = np.random.random((100,100))
                hdu.writeto(os.path.join(self.tmpdir, 'raw', 'test_{0:02d}_{1:02d}.fits'.format(i, j)), clobber=True)
Example #4
0
def construct_wcs_from_scratch():
    print "Constructing WCS from a dictionary header:"
    header = {'NAXIS': 2,
              'NAXIS1': 2048,
              'CTYPE1': 'RA---TAN',
              'CRVAL1': 22.828128476,
              'CRPIX1': 1025.0,
              'CDELT1': 1.0,
              'NAXIS2': 1489,
              'CTYPE2': 'DEC--TAN',
              'CRVAL2': -0.945969070278,
              'CRPIX2': 745.0,
              'CDELT2': 1.0,
              'CD1_1': -1.06502217270E-08,
              'CD1_2': 0.000109979647614,
              'CD2_1': 0.000109949614203,
              'CD2_2': 3.21789868737E-09}
    wcs = WCS(header)
    
    # Test a round-trip from pix to world and back
    origin = 0
    pix_coords = np.arange(1, 11).reshape((5, 2))
    world_coords = wcs.wcs_pix2world(pix_coords, origin)
    pix_coords2 = wcs.wcs_world2pix(world_coords, origin)
    print "  - Round trip matches:", np.allclose(pix_coords, pix_coords2)

    return wcs
Example #5
0
def wcs_add_energy_axis(wcs, energies):
    """Copy a WCS object, and add on the energy axis.

    Parameters
    ----------
    wcs : `~astropy.wcs.WCS`
        WCS
    energies : array-like
       Array of energies.
    """
    if wcs.naxis != 2:
        raise Exception(
            'wcs_add_energy_axis, input WCS naxis != 2 %i' % wcs.naxis)
    w = WCS(naxis=3)
    w.wcs.crpix[0] = wcs.wcs.crpix[0]
    w.wcs.crpix[1] = wcs.wcs.crpix[1]
    w.wcs.ctype[0] = wcs.wcs.ctype[0]
    w.wcs.ctype[1] = wcs.wcs.ctype[1]
    w.wcs.crval[0] = wcs.wcs.crval[0]
    w.wcs.crval[1] = wcs.wcs.crval[1]
    w.wcs.cdelt[0] = wcs.wcs.cdelt[0]
    w.wcs.cdelt[1] = wcs.wcs.cdelt[1]
    w = WCS(w.to_header())
    w.wcs.crpix[2] = 1
    w.wcs.crval[2] = energies[0]
    w.wcs.cdelt[2] = energies[1] - energies[0]
    w.wcs.ctype[2] = 'Energy'
    return w
Example #6
0
def test_wcs_attribute(ccd_data, tmpdir):
    tmpfile = tmpdir.join('temp.fits')
    # This wcs example is taken from the astropy.wcs docs.
    wcs = WCS(naxis=2)
    wcs.wcs.crpix = np.array(ccd_data.shape)/2
    wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
    wcs.wcs.crval = [0, -90]
    wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
    wcs.wcs.set_pv([(2, 1, 45.0)])
    ccd_data.header = ccd_data.to_hdu()[0].header

    ccd_data.header.extend(wcs.to_header())
    ccd_data.write(tmpfile.strpath)
    ccd_new = CCDData.read(tmpfile.strpath)
    original_header_length = len(ccd_new.header)
    # WCS attribute should be set for ccd_new
    assert ccd_new.wcs is not None
    # WCS attribute should be equal to wcs above.
    assert ccd_new.wcs.wcs == wcs.wcs

    # Converting CCDData object with wcs to an hdu shouldn't
    # create duplicate wcs-related entries in the header.
    ccd_new_hdu = ccd_new.to_hdu()[0]
    assert len(ccd_new_hdu.header) == original_header_length

    # Making a CCDData with WCS (but not WCS in the header) should lead to
    # WCS information in the header when it is converted to an HDU.
    ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
    hdu = ccd_wcs_not_in_header.to_hdu()[0]
    wcs_header = wcs.to_header()
    for k in wcs_header.keys():
        # No keyword from the WCS should be in the header.
        assert k not in ccd_wcs_not_in_header.header
        # Every keyword in the WCS should be in the header of the HDU
        assert hdu.header[k] == wcs_header[k]
Example #7
0
def create_wcs(skydir, coordsys='CEL', projection='AIT',
               cdelt=1.0, crpix=1., naxis=2, energies=None):
    """Create a WCS object.

    Parameters
    ----------
    skydir : `~astropy.coordinates.SkyCoord`
        Sky coordinate of the WCS reference point.
    coordsys : str

    projection : str

    cdelt : float or (float,float)
        In the first case the same value is used for x and y axes
    crpix : float or (float,float)
        In the first case the same value is used for x and y axes
    naxis : {2, 3}
       Number of dimensions of the projection.
    energies : array-like
       Array of energies that defines the third dimension if naxis=3.
    """

    w = WCS(naxis=naxis)

    if coordsys == 'CEL':
        w.wcs.ctype[0] = 'RA---%s' % (projection)
        w.wcs.ctype[1] = 'DEC--%s' % (projection)
        w.wcs.crval[0] = skydir.icrs.ra.deg
        w.wcs.crval[1] = skydir.icrs.dec.deg
    elif coordsys == 'GAL':
        w.wcs.ctype[0] = 'GLON-%s' % (projection)
        w.wcs.ctype[1] = 'GLAT-%s' % (projection)
        w.wcs.crval[0] = skydir.galactic.l.deg
        w.wcs.crval[1] = skydir.galactic.b.deg
    else:
        raise Exception('Unrecognized coordinate system.')

    try:
        w.wcs.crpix[0] = crpix[0]
        w.wcs.crpix[1] = crpix[1]
    except:
        w.wcs.crpix[0] = crpix
        w.wcs.crpix[1] = crpix

    try:
        w.wcs.cdelt[0] = cdelt[0]
        w.wcs.cdelt[1] = cdelt[1]
    except:
        w.wcs.cdelt[0] = -cdelt
        w.wcs.cdelt[1] = cdelt

    w = WCS(w.to_header())
    if naxis == 3 and energies is not None:
        w.wcs.crpix[2] = 1
        w.wcs.crval[2] = energies[0]
        w.wcs.cdelt[2] = energies[1] - energies[0]
        w.wcs.ctype[2] = 'Energy'
        w.wcs.cunit[2] = 'MeV'

    return w
Example #8
0
def hextile(image,radius):

    pos=[]
    hs=radius*np.sqrt(3)
    hdus=fits.open(image)
    hdu=flatten(hdus)
    maxy,maxx=hdu.data.shape
    w=WCS(hdu.header)
    print 'Hex tiling image'
    # co-ords of bottom left of image
    ra_c,dec_c=w.wcs_pix2world(maxx/2,maxy/2,0)
    ra_factor=np.cos(dec_c*np.pi/180.0)
    ra_ll,dec_ll=w.wcs_pix2world(0,0,0)
    ra_lr,dec_lr=w.wcs_pix2world(maxx,0,0)
    ra_ul,dec_ul=w.wcs_pix2world(0,maxy,0)
    c_c=SkyCoord(ra_c*u.degree,dec_c*u.degree,frame='icrs')
    c_ll=SkyCoord(ra_ll*u.degree,dec_ll*u.degree,frame='icrs')
    c_lr=SkyCoord(ra_lr*u.degree,dec_lr*u.degree,frame='icrs')
    dra,ddec=[v.value for v in c_c.spherical_offsets_to(c_ll)]
    nha=dra*2/hs
    print 'Number of hexes across',nha
    c_ul=SkyCoord(ra_ul*u.degree,dec_ul*u.degree,frame='icrs')
    dra,ddec=[v.value for v in c_c.spherical_offsets_to(c_ul)]
    nhu=2*ddec/hs
    print 'Number of hexes up',nhu
    nha=int(0.5+nha)
    nhu=int(0.5+nhu)
    for j in range(nhu):
        for i in range(nha):
            xc=(1.0*maxx*(i+(j % 2)*0.5))/nha
            yc=(maxy*(j+0.5))/nhu
            ra_p,dec_p=w.wcs_pix2world(xc,yc,0)
            pos.append((float(ra_p),float(dec_p)))
    return ra_factor,pos
Example #9
0
def contains(image, x, y, world=True):
    """Check if given pixel or world positions are in an image.

    Parameters
    ----------
    image : `~astropy.io.fits.ImageHDU`
        2-dim FITS image
    x : float
        x coordinate in the image
    y : float
        y coordinate in the image
    world : bool, optional
        Are x and y in world coordinates (or pixel coordinates)?

    Returns
    -------
    containment : array
        Bool array
    """
    header = image.header

    if world:
        wcs = WCS(header)
        origin = 0  # convention for gammapy
        x, y = wcs.wcs_world2pix(x, y, origin)

    nx, ny = header['NAXIS2'], header['NAXIS1']
    return (x >= 0.5) & (x <= nx + 0.5) & (y >= 0.5) & (y <= ny + 0.5)
Example #10
0
def makecuts(image,imagefilter):
    catdat= fits.getdata(args.catalog)
    print 'Cutting out', image    
    
    zFlag = (catdat.Z > Zmin) & (catdat.Z < Zmax)
    
    f = fits.open(image)
    prihdr = f[0].header
    n2,n1 = f[0].data.shape
    
    w= WCS(image)
    px,py = w.wcs_world2pix(catdat.RA,catdat.DEC,1)
    onimageflag=(px < n1) & (px >0) & (py < n2) & (py > 0)
    
    keepflag=zFlag & onimageflag
    RA=catdat.RA[keepflag]
    DEC=catdat.DEC[keepflag]
    radius=catdat.SERSIC_TH50[keepflag]
    IDNUMBER=catdat.NSAID[keepflag]
    print 'number of galaxies to keep = ', sum(keepflag)

#    if args.region_file:
        
    for i in range(len(RA)):

        if (radius[i]<.01):
            size=120.
        else:
            size=float(args.scale)*radius[i]
            
        position = SkyCoord(ra=RA[i],dec=DEC[i],unit='deg')
        size = u.Quantity((size, size), u.arcsec)
        #print image, radius[i], position, size
        #cutout = Cutout2D(fdulist[0].data, position, size, wcs=w, mode='strict') #require entire image to be on parent image
        try:
            cutout = Cutout2D(f[0].data, position, size, wcs=w, mode='trim') #require entire image to be on parent image
        except astropy.nddata.utils.PartialOverlapError:# PartialOverlapError:
            print 'galaxy is only partially covered by mosaic - skipping ',IDNUMBER[i]
            continue
        except astropy.nddata.utils.NoOverlapError:# PartialOverlapError:
            print 'galaxy is not covered by mosaic - skipping ',IDNUMBER[i]
            continue
        if args.plot:
            plt.figure()
            plt.imshow(f[0].data, origin='lower',cmap='gray', norm=LogNorm())
            cutout.plot_on_original(color='white')
            plt.show()
            r = raw_input('type any key to continue (p to skip plotting) \n')
            if r.find('p') > -1:
                args.plot = False
        # figure out how to save the cutout as fits image
        ((ymin,ymax),(xmin,xmax)) = cutout.bbox_original
        outimage = args.prefix+'-'+(str(IDNUMBER[i])+'-'+ args.filter+".fits")
        newfile = fits.PrimaryHDU()
        newfile.data = f[0].data[ymin:ymax,xmin:xmax]
        newfile.header = f[0].header
        newfile.header.update(w[ymin:ymax,xmin:xmax].to_header())
        
        fits.writeto(outimage, newfile.data, header = newfile.header, clobber=True)
    return cutout
Example #11
0
def test_wcs_keyword_removal_for_wcs_test_files():
    """
    Test, for the WCS test files, that keyword removall works as
    expected. Those cover a much broader range of WCS types than
    test_wcs_keywords_removed_from_header
    """
    from astropy.nddata.ccddata import _generate_wcs_and_update_header
    from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER

    keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
    wcs_headers = get_pkg_data_filenames('../../wcs/tests/data',
                                         pattern='*.hdr')

    for hdr in wcs_headers:
        # Skip the files that are expected to be bad...
        if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr:
            continue
        header_string = get_pkg_data_contents(hdr)
        wcs = WCS(header_string)
        header = wcs.to_header(relax=True)
        new_header, new_wcs = _generate_wcs_and_update_header(header)
        # Make sure all of the WCS-related keywords have been removed.
        assert not (set(new_header) &
                    set(new_wcs.to_header(relax=True)) -
                    keepers)
        # Check that the new wcs is the same as the old.
        new_wcs_header = new_wcs.to_header(relax=True)
        for k, v in new_wcs_header.items():
            if isinstance(v, str):
                assert header[k] == v
            else:
                np.testing.assert_almost_equal(header[k], v)
Example #12
0
def crossmatchtwofiles(img1, img2, radius=3):
    ''' This module is crossmatch two images:
        It run sextractor transform the pixels position of the the sources in coordinates and crossmatch them  
        The output is a dictionary with the objects in common
    '''
    import lsc
    from astropy.wcs import WCS
    from numpy import array, argmin, min, sqrt

    hd1 = fits.getheader(img1)
    hd2 = fits.getheader(img2)
    wcs1 = WCS(hd1)
    wcs2 = WCS(hd2)

    xpix1, ypix1, fw1, cl1, cm1, ell1, bkg1, fl1 = lsc.lscastrodef.sextractor(img1)
    xpix2, ypix2, fw2, cl2, cm2, ell2, bkg2, fl2 = lsc.lscastrodef.sextractor(img2)
    xpix1, ypix1, xpix2, ypix2 = array(xpix1, float), array(ypix1, float), array(xpix2, float), array(ypix2, float)

    bb = wcs1.wcs_pix2world(zip(xpix1, ypix1), 1)  #   transform pixel in coordinate
    xra1, xdec1 = zip(*bb)
    bb = wcs2.wcs_pix2world(zip(xpix2, ypix2), 1)  #   transform pixel in coordinate
    xra2, xdec2 = zip(*bb)

    xra1, xdec1, xra2, xdec2 = array(xra1, float), array(xdec1, float), array(xra2, float), array(xdec2, float)
    distvec, pos1, pos2 = lsc.lscastrodef.crossmatch(xra1, xdec1, xra2, xdec2, radius)
    #dict={}
    dict = {'ra1': xra1[pos1], 'dec1': xdec1[pos1], 'ra2': xra2[pos2], 'dec2': xdec2[pos2], \
            'xpix1': xpix1[pos1], 'ypix1': ypix1[pos1], 'xpix2': xpix2[pos2], 'ypix2': ypix2[pos2]}
    np.savetxt('substamplist', zip(xpix1[pos1], ypix1[pos1]), fmt='%10.10s\t%10.10s')
    return 'substamplist', dict
Example #13
0
def checkast(imglist):
    hdr0 = lsc.util.readhdr(imglist[0])
    # ######  check with sources the accuracy of the astrometry
    wcs = WCS(hdr0)
    xpix, ypix, fw, cl, cm, ell, bkg = lsc.lscastrodef.sextractor(imglist1[0])
    pixref = np.array(zip(xpix, ypix), float)
    sky0 = wcs.wcs_pix2world(pixref, 1)
    max_sep = 10
    for img in imglist1:
        xsex, ysex, fw, cl, cm, ell, bkg = lsc.lscastrodef.sextractor(img)  # sextractor
        hdr1 = lsc.util.readhdr(img)
        wcs1 = WCS(hdr1)
        pix1 = wcs1.wcs_world2pix(sky0, 1)
        xpix1, ypix1 = zip(*pix1)  # pixel position of the obj in image 0
        xdist, ydist = [], []
        for i in range(len(xpix1)):
            dist = np.sqrt((xpix1[i] - xsex) ** 2 + (ypix1[i] - ysex) ** 2)
            idist = np.argmin(dist)
            if dist[idist] < max_sep:
                xdist.append(xpix1[i] - xsex[idist])
                ydist.append(ypix1[i] - ysex[idist])
        xoff, xstd = round(np.median(xdist), 2), round(np.std(xdist), 2)
        yoff, ystd = round(np.median(ydist), 2), round(np.std(ydist), 2)
        _xdist, _ydist = np.array(xdist), np.array(ydist)
        good = (np.abs(_xdist - xoff) < 3 * xstd) & (np.abs(_ydist - yoff) < 3 * ystd)
        __xdist = _xdist[good]
        __ydist = _ydist[good]
        xoff, xstd = round(np.median(__xdist), 2), round(np.std(__xdist), 2)
        yoff, ystd = round(np.median(__ydist), 2), round(np.std(__ydist), 2)
        if np.isnan(xoff): xoff, xstd = 0, 0
        if np.isnan(yoff): yoff, ystd = 0, 0
        print xoff, xstd, len(__xdist)
        print yoff, ystd
        lsc.updateheader(img, 0, {'CRPIX1': (hdr1['CRPIX1'] - xoff, 'Value at ref. pixel on axis 1')})
        lsc.updateheader(img, 0, {'CRPIX2': (hdr1['CRPIX2'] - yoff, 'Value at ref. pixel on axis 2')})
Example #14
0
    def empty_like(cls, image, name=None, unit='', fill=0, meta=None):
        """
        Create an empty image like the given image.

        The WCS is copied over, the data array is filled with the ``fill`` value.

        Parameters
        ----------
        image : `~gammapy.image.SkyImage` or `~astropy.io.fits.ImageHDU`
            Instance of `~gammapy.image.SkyImage`.
        fill : float, optional
            Fill image with constant value. Default is 0.
        name : str
            Name of the image.
        unit : str
            String specifying the data units.
        meta : `~collections.OrderedDict`
            Dictionary to store meta data.
        """
        if isinstance(image, SkyImage):
            wcs = image.wcs.copy()
        elif isinstance(image, (fits.ImageHDU, fits.PrimaryHDU)):
            wcs = WCS(image.header)
        else:
            raise TypeError("Can't create image from type {}".format(type(image)))

        data = fill * np.ones_like(image.data)

        header = wcs.to_header()
        header.update(meta)
        return cls(name, data, wcs, unit, meta=header)
Example #15
0
class ProjectionPywcsNd(_ProjectionSubInterface, ProjectionBase):
    """
    A wrapper for WCS
    """

    def __init__(self, header):
        """
        header could be astropy.io.fits.Header or astropy.wcs.WCS instance
        """

        if isinstance(header, Header):
            self._pywcs = WCS(header=header)
        elif isinstance(header, WCS):
            self._pywcs = header
        else:
            raise ValueError("header must be an instance of "
                             "astropy.io.fits.Header or a WCS object")

        ProjectionBase.__init__(self)

    def _get_ctypes(self):
        return tuple(self._pywcs.wcs.ctype)

    ctypes = property(_get_ctypes)

    def _get_equinox(self):
        return self._pywcs.wcs.equinox

    equinox = property(_get_equinox)

    def _get_naxis(self):
        return self._pywcs.wcs.naxis

    naxis = property(_get_naxis)

    def topixel(self, xy):
        """ 1, 1 base """

        lon_lat = np.array(xy)

        self.fix_lon(lon_lat)

        xy1 = lon_lat.transpose()

        # somehow, wcs_world2pix does not work for some cases
        xy21 = [self._pywcs.wcs_world2pix([xy11], 1)[0] for xy11 in xy1]
        # xy21 = self._pywcs.wcs_world2pix(xy1, 1)

        xy2 = np.array(xy21).transpose()
        return xy2

    def toworld(self, xy):
        """ 1, 1 base """
        xy2 = self._pywcs.wcs_pix2world(np.asarray(xy).T, 1)

        lon_lat = xy2.T
        # fixme
        self.fix_lon(lon_lat)

        return lon_lat
Example #16
0
def strip_wcs_from_header(header):
    """
    Given a header with WCS information, remove ALL WCS information from that
    header
    """

    hwcs = WCS(header)
    wcsh = hwcs.to_header()

    keys_to_keep = [k for k in header
                    if (k and k not in wcsh and 'NAXIS' not in k)]

    newheader = header.copy()
    for kw in newheader.keys():
        if kw not in keys_to_keep:
            del newheader[kw]

    for kw in ('CRPIX{ii}', 'CRVAL{ii}', 'CDELT{ii}', 'CUNIT{ii}',
               'CTYPE{ii}', 'PC0{ii}_0{jj}', 'CD{ii}_{jj}',):
        for ii in range(5):
            for jj in range(5):
                k = kw.format(ii=ii,jj=jj)
                if k in newheader.keys():
                    del newheader[k]


    return newheader
Example #17
0
    def wcs_sky2pix(self, x, y, origin):
        if self.naxis == 2:
            if self._dimensions[1] < self._dimensions[0]:
                xp, yp = AstropyWCS.wcs_sky2pix(self, y, x, origin)
                return yp, xp
            else:
                return AstropyWCS.wcs_sky2pix(self, x, y, origin)
        else:
            coords = []
            s = 0
            for dim in range(self.naxis):
                if dim == self._dimensions[0]:
                    coords.append(x)
                elif dim == self._dimensions[1]:
                    coords.append(y)
                else:
                    # The following is an approximation, and will break down if
                    # the world coordinate changes significantly over the slice
                    coords.append(np.repeat(self._mean_world[dim], x.shape))
                    s += 1
            coords = np.vstack(coords).transpose()

            # Due to a bug in pywcs, we need to loop over each coordinate
            # result = AstropyWCS.wcs_sky2pix(self, coords, origin)
            result = np.zeros(coords.shape)
            for i in range(result.shape[0]):
                result[i:i + 1, :] = AstropyWCS.wcs_sky2pix(self, coords[i:i + 1, :], origin)

            return result[:, self._dimensions[0]], result[:, self._dimensions[1]]
Example #18
0
def test_reproject_celestial_3d():
    """
    Test both full_reproject and slicewise reprojection. We use a case where the
    non-celestial slices are the same and therefore where both algorithms can
    work.
    """

    header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))

    array_in = np.ones((3, 200, 180))

    # TODO: here we can check that if we change the order of the dimensions in
    # the WCS, things still work properly

    wcs_in = WCS(header_in)
    wcs_out = wcs_in.deepcopy()
    wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', wcs_in.wcs.ctype[2]]
    wcs_out.wcs.crval = [158.0501, -21.530282, wcs_in.wcs.crval[2]]
    wcs_out.wcs.crpix = [50., 50., wcs_in.wcs.crpix[2] + 0.5]

    out_full, foot_full = _reproject_full(array_in, wcs_in, wcs_out, (3, 160, 170))

    out_celestial, foot_celestial = _reproject_celestial(array_in, wcs_in, wcs_out, (3, 160, 170))

    np.testing.assert_allclose(out_full, out_celestial)
    np.testing.assert_allclose(foot_full, foot_celestial)
Example #19
0
def test_reproject_celestial_3d_equ2gal(indep_slices, axis_order):
    """
    Test reprojection of a 3D cube with celestial components, which includes a
    coordinate system conversion (the original header is in equatorial
    coordinates). We test using both the 'fast' method which assumes celestial
    slices are independent, and the 'full' method. We also scramble the input
    dimensions of the data and header to make sure that the reprojection can
    deal with this.
    """

    # Read in the input cube
    hdu_in = fits.open(os.path.join(DATA, 'equatorial_3d.fits'))[0]

    # Define the output header - this should be the same for all versions of
    # this test to make sure we can use a single reference file.
    header_out = hdu_in.header.copy()
    header_out['NAXIS1'] = 10
    header_out['NAXIS2'] = 9
    header_out['CTYPE1'] = 'GLON-SIN'
    header_out['CTYPE2'] = 'GLAT-SIN'
    header_out['CRVAL1'] = 163.16724
    header_out['CRVAL2'] = -15.777405
    header_out['CRPIX1'] = 6
    header_out['CRPIX2'] = 5

    # We now scramble the input axes
    if axis_order != (0, 1, 2):
        wcs_in = WCS(hdu_in.header)
        wcs_in = wcs_in.sub((3 - np.array(axis_order)[::-1]).tolist())
        hdu_in.header = wcs_in.to_header()
        hdu_in.data = np.transpose(hdu_in.data, axis_order)

    array_out, footprint_out = reproject_interp(hdu_in, header_out,
                                                independent_celestial_slices=indep_slices)
    return array_footprint_to_hdulist(array_out, footprint_out, header_out)
def LoadFitsSpectrum(filename,hdu=0,indx=0):
    """Load and return the wavelength calibrated input HCT fits spectrum as 2 column numpy array. 
    hdu  : specifies the hdulist to read data and header
    indx  : specifies the column in the data to choose. In HCT, 0 for flux data and 2 for sky """
    fitsfile = fits.open(filename)
    flux = fitsfile[hdu].data  #[indx,0,:]
    w = WCS(fitsfile[hdu].header)
    Size = fitsfile[hdu].header['NAXIS1']
    
    # try :
    #     ref_pixel = fitsfile[hdu].header['CRPIX1']
    #     coord_ref_pixel = fitsfile[hdu].header['CRVAL1']
    #     wave_per_pixel = fitsfile[hdu].header['CDELT1']
    # except KeyError as e :
    #     print('Error: Missing keywords in fits header to do wavelength calibration')
    #     print(e)
    #     print('You might have entered wrong file name. Hence I am raising IOError')
    #     print('Enter the fits file name which is wavelength calibrated.')
    #     raise IOError
    # else:
    #    w_start=coord_ref_pixel - ((ref_pixel-1) * wave_per_pixel)  #Starting wavelength
    #    Wavelengths = w_start+np.arange(len(flux))*wave_per_pixel    


    CoordArray = np.zeros((Size,w.naxis))
    CoordArray[:,0] = np.arange(Size)
    Wavelengths = w.wcs_pix2world(CoordArray,0)[:,0]

    return np.vstack((Wavelengths,flux)).T
def get_pix_coords(ra=None, dec=None, header=None):

    ''' Ra and dec in (hrs,min,sec) and (deg,arcmin,arcsec), or Ra in degrees
    and dec in degrees.
    '''

    import pywcsgrid2 as wcs
    import pywcs
    from astropy.wcs import WCS

    # convert to degrees if ra and dec are array-like
    try:
        if len(ra) == 3 and len(dec) == 3:
            ra_deg, dec_deg = hrs2degs(ra=ra, dec=dec)
        else:
            raise ValueError('RA and Dec must be in (hrs,min,sec) and' + \
                    ' (deg,arcmin,arcsec) or in degrees.')
    except TypeError:
        ra_deg, dec_deg = ra, dec

    #wcs_header = pywcs.WCS(header)
    wcs_header = WCS(header)
    #pix_coords = wcs_header.wcs_sky2pix([[ra_deg, dec_deg, 0]], 0)[0]
    pix_coords = wcs_header.wcs_world2pix([[ra_deg, dec_deg],], 0)[0]

    return np.hstack((pix_coords, -1))
Example #22
0
def check_coverage(ra,dec,hdu):
    #
    # check and return the average coverage in a 3x3 box around (ra,dec) coordinates
    #
    # Note: it does not check if the point is outside the HDU['coverage'] image
    # in this case it will return 0 anyway.
    #
    try:
        cvrg = hdu['coverage']
    except:
        print ("Cannot read the coverage map")
        return 0.0
    #
    wcs = WCS(cvrg.header)
    (ny,nx) = cvrg.data.shape
    (xp,yp) = wcs.wcs_world2pix(ra,dec, 1)
    xp = xp.astype(int)
    yp = yp.astype(int)
    if (yp < 0 or xp < 0 or yp > ny or xp > nx):
        print ("Point outside the input image")
        return 0.0
    #
    # get a 3x3 pixels around each source and take the average coverage
    #
    y0 = max(0,yp - 1)
    y1 = min(ny,yp + 2)
    x0 = max(0,xp - 1)
    x1 = min(nx,xp + 2)
    wcov = cvrg.data[y0:y1,x0:x1]
    return np.mean(wcov)
Example #23
0
def xy2radec(imfile_or_hdr, x, y, ext=0):
    """ Convert the given x,y pixel position into
    ra,dec sky coordinates (in decimal degrees) for the given image.

    NOTE : this program assumes the input position follows the fits convention,
    with the center of the lower left pixel at (1,1).  The numpy/scipy
    convention sets the center of the lower left pixel at (0,0).

    :param imfile_or_hdr: image filename or astropy.io.fits Header object
    """
    from astropy.io import fits
    from astropy.wcs import WCS

    if isinstance(imfile_or_hdr, str):
        header = fits.getheader(imfile_or_hdr, ext=ext)
    elif isinstance(imfile_or_hdr, fits.Header):
        header = imfile_or_hdr
    else:
        print("WARNING: could not convert x,y to ra,dec for %s" %
               str(imfile_or_hdr))
    # try:
    # alternate WCS construction may be necessary for ACS files ?
    # wcs = WCS(fobj=fobj, header=header)
    # except KeyError:
    wcs = WCS(header=header)
    # fobj.close()
    ra, dec = wcs.all_pix2world(x, y, 1)
    return ra, dec
Example #24
0
    def setup_class(self):
        self.data = np.arange(20.).reshape(5, 4)
        self.position = SkyCoord('13h11m29.96s -01d19m18.7s', frame='icrs')
        wcs = WCS(naxis=2)
        rho = np.pi / 3.
        scale = 0.05 / 3600.
        wcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)],
                      [scale*np.sin(rho), scale*np.cos(rho)]]
        wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
        wcs.wcs.crval = [self.position.ra.to_value(u.deg),
                         self.position.dec.to_value(u.deg)]
        wcs.wcs.crpix = [3, 3]
        self.wcs = wcs

        # add SIP
        sipwcs = wcs.deepcopy()
        sipwcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
        a = np.array(
            [[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
             [0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
             [-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
             [-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
             [-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
        )
        b = np.array(
            [[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
             [0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
             [6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
             [3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
             [-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
        )
        sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)
        sipwcs.wcs.set()
        self.sipwcs = sipwcs
Example #25
0
def mapped_column_density_HI(ra, dec, map_name='LAB'):
    """Return the mapped H_I column density at a given position in the sky.

    The value is read from one of the available input maps. Note that the
    data in the maps are stored in Galactic coordinates, while we're
    giving Celestial coordinates in input, here---the transformation is
    handled internally.

    Arguments
    ---------
    ra : float
        Right ascension of the source (in decimal degrees).

    dec: float
        Declination of the source (in decimal degrees).

    map: str
        The HI column density map to use. Can be either 'LAB' (LAB survey)
        or 'DL' (Dickey & Lockman).
    """
    # Make sure the map name makes sense.
    assert map_name in ['LAB', 'DL']
    # Transform from Celestial to Galactic coordinates.
    gal_coords = SkyCoord(ra, dec, unit='deg').galactic
    l, b = gal_coords.l.degree, gal_coords.b.degree
    # Open the selected input FITS map and grab the values.
    file_path = os.path.join(XIMPOL_SRCMODEL,'fits','h1_nh_%s.fits' % map_name)
    if not os.path.exists(file_path):
        abort('Could not find %s' % file_path)
    hdu_list = fits.open(file_path)
    _wcs = WCS(hdu_list[0].header)
    _data = hdu_list[0].data
    row, col = [int(item) for item in _wcs.wcs_world2pix(l, b, 1)]
    return _data[col, row]
Example #26
0
def edge_detect(im, hdr):
    w = WCS(hdr)
    ra = []
    dec = []
    exclude_RA = np.NaN
    exclude_DEC = np.NaN
    contours = measure.find_contours(im,0.5,fully_connected='high')
    x_pix = contours[0][:,0]
    y_pix = im.shape[1] - contours[0][:,1] - 1
    exclude_reg = np.array(contours).shape[0] - 1
    if exclude_reg > 0:
        i = 1
        exclude_RA = []
        exclude_DEC = []
        while i <= exclude_reg:
            x_excl = contours[i][:,0]
            y_excl = im.shape[1] - contours[i][:,1] - 1
            tmp_RA = []
            tmp_DEC = []
            for j in np.arange(len(x_excl)):
                x, y = w.wcs_pix2world(y_excl[j], x_excl[j], 0)
                tmp_RA.append(x.tolist())
                tmp_DEC.append(y.tolist())
            exclude_RA.append(tmp_RA)
            exclude_DEC.append(tmp_DEC)
            i += 1
    for i in np.arange(len(x_pix)):
        x, y = w.wcs_pix2world(y_pix[i], x_pix[i], 0)
        ra.append(x.tolist())
        dec.append(y.tolist())

    return ra, dec, exclude_RA, exclude_DEC
Example #27
0
 def make_astrometry_map(self,outname,factor):
     # factor tells us how much bigger than cellsize the pixels will be
     hdus=fits.open(self.imroot+'.app.restored.fits')
     _,_,yd,xd=hdus[0].data.shape
     yd/=factor
     xd/=factor
     hdus[0].header['CDELT1']*=factor
     hdus[0].header['CDELT2']*=factor
     hdus[0].header['CRPIX1']/=factor
     hdus[0].header['CRPIX2']/=factor
     w=WCS(hdus[0].header)
     rmap=np.ones((1,1,yd,xd))*np.nan
     # this would be faster with use of e.g. PIL
     for y in range(yd):
         print '.',
         sys.stdout.flush()
         xv=np.array(range(xd))
         yv=y*np.ones_like(xv)
         ra,dec,_,_=w.wcs_pix2world(xv,yv,0,0,0)
         dra,ddec=self.r.coordconv(ra,dec)[1]
         for i,x in enumerate(xv):
             number=self.r.which_poly(dra[i],ddec[i],convert=False)
             if number is not None:
                 direction=self.pli[number]
                 rmap[0,0,y,x]=np.sqrt(self.rae[direction,2]**2.0+self.dece[direction,2]**2.0)
     print
     hdus[0].data=rmap
     hdus.writeto(outname,clobber=True)
Example #28
0
    def setup_class(self):

        # make a fake header to test the helper functions which access the header
        w = WCS(naxis=2)

        w.wcs.crpix = [crpix_val, crpix_val]
        w.wcs.cdelt = np.array([-cdelt_val, cdelt_val])
        w.wcs.crval = [cr1val_val, cr2val_val]
        w.wcs.ctype = [b"RA---TAN", b"DEC--TAN"]
        w.wcs.crota = [0, crota2_val]

        self.header = w.to_header()

        # make a temporary directory for the input and output
        self.tmpdir = tempfile.mkdtemp()

        # get the test data and copy it to the temp directory
        if os.path.exists('../data/testimgs'): # copy from ../data/testimgs if that exists 
            shutil.copytree('../data/testimgs',self.tmpdir+'/imagecubetest')
        else: # download and symlink to temp directory: NOT WORKING
            os.makedirs(self.tmpdir+'/imagecubetest/')
            for fname in test_data_files:
                tmpname = download_file(test_data_loc+fname)
                linked_name = self.tmpdir+'/imagecubetest/'+fname
                shutil.copy2(tmpname, linked_name)
Example #29
0
def get_frames_with_target(myfile, ra, dec, debug=False):
    
    hdulist = pf.open(myfile)
    if len(hdulist)>1:
        indices = np.arange(len(hdulist)-1)+1
    else:
        indices = np.array([0])
        
    frames = []
    for i in indices:
    
        prihdr = hdulist[i].header
        img = hdulist[i].data * 1.
        
        ny, nx = img.shape
        if (ra * dec != 0):

            # Get pixel coordinates of SN
            wcs = WCS(prihdr)
            try:
                target_pix = wcs.wcs_sky2pix([(np.array([ra,dec], np.float_))], 1)[0]
            except:
                print ("ERROR when converting sky to wcs. Is astrometry in place? Default coordinates assigned.") 
                target_pix = [+nx/2., ny/2.]

            if debug: print (i, target_pix) 
        else:
            target_pix = [+nx/2., ny/2.]
        
        if (target_pix[0] > 0 and target_pix[0]<nx) and (target_pix[1] > 0 and target_pix[1]<ny):
            frames.append(i)
            
    return np.array(frames)
Example #30
0
def split1d(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d????.fits')
    if len(fs) == 0:
        print "WARNING: No extracted spectra to split."
        iraf.cd('..')
        return

    for f in fs:
        hdu = pyfits.open(f.replace('x1d', 'fix'))
        chipgaps = get_chipgaps(hdu)
        # Throw away the first pixel as it almost always bad
        chipedges = [[1, chipgaps[0][0]], [chipgaps[0][1] + 1, chipgaps[1][0]],
                     [chipgaps[1][1] + 1, chipgaps[2][0]]]

        w = WCS(f)
        # Copy each of the chips out seperately. Note that iraf is 1 indexed
        # unlike python so we add 1
        for i in range(3):
            # get the wavelengths that correspond to each chip
            lam, _apnum, _bandnum = w.all_pix2world(chipedges[i], 0, 0, 0)
            iraf.scopy(f, f[:-5] + 'c%i' % (i + 1), w1=lam[0], w2=lam[1],
                       format='multispec', rebin='no',clobber='yes')
        hdu.close()
    iraf.cd('..')
Example #31
0
from astropy.wcs import WCS

from .transforms import (WCSPixel2WorldTransform, WCSWorld2PixelTransform,
                         CoordinateTransform)
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta
from .wcs_utils import wcs_to_celestial_frame
from .frame import RectangularFrame
import numpy as np

__all__ = ['WCSAxes', 'WCSAxesSubplot']

VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']

IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [1., 1.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]


class WCSAxes(Axes):

    def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
                 transData=None, slices=None, frame_class=RectangularFrame,
                 **kwargs):

        super(WCSAxes, self).__init__(fig, rect, **kwargs)
        self._bboxes = []
def reject_sources(name,
                   catname,
                   datname,
                   min_snr=5,
                   max_size=None,
                   max_size_ID=None,
                   flux_type='peak'):

    #catname: name of catalog of sources, with required columns 'gauss_x_'+name, 'gauss_y_'+name, 'FWHM_major_'+name, 'FWHM_minor_'+name, and 'position_angle_'+name
    #datname: name of data fits image
    #min_snr: minimum SNR value, all sources with SNR below this will be rejected
    #max_size: maximum major axis radius for ellipse around source, in sigma
    #max_size_ID: alternatively, give the index of a source to set that source's radius to the maximum
    #flux_type: if 'peak', chooses the brightest pixel, if 'percentile', flux measured by average of top 10% of pixels

    catalog = fits.getdata(catname)
    catalog = Table(catalog)

    bad_inds = np.where(np.isnan(catalog['ap_flux_' + name]) == True)
    catalog.remove_rows(bad_inds)

    fl = fits.open(datname)
    data = fl[0].data.squeeze()
    header = fl[0].header
    mywcs = WCS(header).celestial

    sigma_to_FWHM = 2 * np.sqrt(2 * np.log(2))

    pixel_scale = np.abs(mywcs.pixel_scale_matrix.diagonal().prod()
                         )**0.5 * u.deg  #for conversion to pixels

    snr_vals = []
    cutout_images = []
    masks = []
    bg_arr = []
    bg_arr2 = []
    reject = np.full(len(catalog), False)

    for i in range(len(catalog)):
        x_cen = catalog['gauss_x_' + name][i] * u.deg
        y_cen = catalog['gauss_y_' + name][i] * u.deg
        major_fwhm = (catalog['FWHM_major_' + name][i] * u.arcsec).to(u.degree)
        minor_fwhm = (catalog['FWHM_minor_' + name][i] * u.arcsec).to(u.degree)
        position_angle = catalog['position_angle_' + name][i] * u.deg

        annulus_width = 15
        center_pad = 10  #pad between ellipse and inner radius
        # Define some ellipse properties in pixel coordinates
        position = SkyCoord(x_cen, y_cen, frame='icrs', unit=(u.deg, u.deg))
        pix_position = np.array(position.to_pixel(mywcs))

        pix_major_fwhm = major_fwhm / pixel_scale
        pix_minor_fwhm = minor_fwhm / pixel_scale

        # Cutout section of the image we care about, to speed up computation time
        size = (
            (center_pad + annulus_width) * pixel_scale + major_fwhm
        ) * 2.2  #2.2 is arbitrary to get entire annulus and a little extra
        cutout = Cutout2D(data, position, size, mywcs,
                          mode='partial')  #cutout of outer circle
        cutout_center = regions.PixCoord(
            cutout.center_cutout[0],
            cutout.center_cutout[1])  #center of the cutout in pixel coords

        # Define the aperture regions needed for SNR
        ellipse_reg = regions.EllipsePixelRegion(cutout_center,
                                                 pix_major_fwhm * 2.,
                                                 pix_minor_fwhm * 2.,
                                                 angle=position_angle)
        innerann_reg = regions.CirclePixelRegion(cutout_center,
                                                 center_pad + pix_major_fwhm)
        outerann_reg = regions.CirclePixelRegion(
            cutout_center, center_pad + pix_major_fwhm + annulus_width)

        # Make masks from aperture regions
        ellipse_mask = mask(ellipse_reg, cutout)
        annulus_mask = mask(outerann_reg, cutout) - mask(innerann_reg, cutout)

        # Calculate the SNR and aperture flux sums
        pixels_in_annulus = cutout.data[annulus_mask.astype(
            'bool')]  #pixels within annulus
        pixels_in_ellipse = cutout.data[ellipse_mask.astype(
            'bool')]  #pixels in ellipse
        bg_rms = rms(pixels_in_annulus)
        bg_mean = np.mean(pixels_in_annulus)
        bg_median = np.median(pixels_in_annulus)

        if flux_type == 'peak':
            peak_flux = catalog['peak_flux_' + name][i]
        if flux_type == 'percentile':
            top_percent = np.nanpercentile(pixels_in_ellipse, 90)
            peak_flux = np.mean(
                pixels_in_ellipse[pixels_in_ellipse > top_percent])
        snr = peak_flux / bg_rms
        catalog['ap_flux_err_' + name][i] = bg_rms
        bg_arr.append(bg_mean)
        bg_arr2.append(bg_median)

        if snr < min_snr:  #if low snr, reject
            reject[i] = True
        if max_size_ID is not None:
            if catalog['major_sigma'][i] > catalog['major_sigma'][
                    max_size_ID] + 0.01 / 3600:  #if too big a source, reject
                reject[i] = True
        if max_size is not None:
            if catalog['major_sigma'][
                    i] > max_size:  #if too big a source, reject
                reject[i] = True
        snr_vals.append(snr)
        cutout_images.append(cutout.data)
        masks.append(ellipse_mask + annulus_mask)

    catalog['bg_mean_' + name] = bg_arr
    catalog['bg_median_' + name] = bg_arr2
    plot_grid(cutout_images, masks, reject, snr_vals, catalog['_idx_' + name])
    plt.show(block=False)
    line_remove = input(
        'enter id values for sources to exclude from the catalog, seperated by whitespace: '
    )
    man_input_rem = np.array(line_remove.split(), dtype='int')
    id_ind_true = np.where(
        np.in1d(catalog['_idx_' + name], man_input_rem) == True)
    reject[id_ind_true] = True

    line_keep = input(
        'enter id values for removed sources to include from the catalog, seperated by whitespace: '
    )
    man_input_keep = np.array(line_keep.split(), dtype='int')
    id_ind_false = np.where(
        np.in1d(catalog['_idx_' + name], man_input_keep) == True)
    reject[id_ind_false] = False

    rej_ind = np.where(reject == True)
    catalog.remove_rows(rej_ind)
    return catalog
Example #33
0
                                   1,
                                   wcs='logical',
                                   logfile='tmp.log',
                                   keeplog=True)
                    xytargets = iraf.fields('tmp.log', '1,2', Stdout=1)
                    _xpos, _ypos = string.split(xytargets[0])[0], string.split(
                        xytargets[0])[1]
                elif not _ra or not _dec:
                    print 'use ra and dec from input database !!! '
                    _ra, _dec, _SN0, _type = lsc.util.checksndb(
                        img0, 'targets')

                if _ra and _dec:
                    print 'convert RA, dec to xpos, ypos using header'
                    hdr0 = lsc.util.readhdr(img0)
                    wcs = WCS(hdr0)
                    pix1 = wcs.wcs_world2pix([(_ra, _dec)], 1)
                    _xpos, _ypos = pix1[0][0], pix1[0][1]
                elif _mag != 0:
                    sys.exit('need to define coordinates for subtraction')

            if goon:
                print 'pixel coordinates to subtract:', _xpos, _ypos
                print img0, psfimg
                imgout = re.sub('.fits', '.temp.fits',
                                string.split(img0, '/')[-1])
                lsc.util.delete('_tmp.fits,_tmp2.fits,_tmp2.fits.art,' +
                                imgout)
                _targetid = lsc.mysqldef.targimg(img0)
                if _clean:
                    if os.path.isfile(re.sub('.fits', '.clean.fits', img0)):
Example #34
0
 if fitsFile[0].header['PRIMESI'] == 'WFC3':
     fov_image = fitsFile[1].data  # check the back grounp
     header = fitsFile[
         1].header  # if target position is add in WCS, the header should have the wcs information, i.e. header['EXPTIME']
     # wht = fitsFile[2].data # The WHT map
     # exp =  astro_tools.read_fits_exp(fitsFile[0].header)  #Read the exposure time
     # pixel_scale = astro_tools.read_pixel_scale(fitsFile[1].header)  #Read pixel scale
     # mean_wht = exp * (pixel_scale/0.135)**2
     # exp_map = exp * wht/mean_wht
     # data_process = DataProcess(fov_image = fov_image, target_pos = [RA, Dec], pos_type = 'wcs', header = header,
     #                       rm_bkglight = True, exptime = exp_map, if_plot=False, zp = 27.0)
     # try:
     #     data_process.generate_target_materials(radius=25, create_mask = False, if_plot=True)
     # except:
     #     print()
     wcs = WCS(header)
     target_pos = wcs.all_world2pix([[RA, Dec]], 1)[0]
     target_pos = np.int0(target_pos)
     if target_pos[0] > 0 and target_pos[1] > 0:
         target_stamp = cutout(image=fov_image,
                               center=target_pos,
                               radius=45)
         target_stamp[np.isnan(target_stamp)] = 0
         plt.imshow(target_stamp - target_stamp.min(),
                    norm=LogNorm(),
                    cmap='gist_heat',
                    vmax=target_stamp.max(),
                    vmin=1.e-4,
                    origin='lower')
         plt.colorbar()
         plt.close()
Example #35
0
LC = fits.open(
    os.path.join(filedir,
                 "hlsp_everest_k2_llc_201920032-c01_kepler_v2.0_lc.fits"))
pf = fits.open("ktwo201920032-c01_lpd-targ.fits")
mask = np.where(LC[1].data['QUALITY'] == 0)
t = LC[1].data['TIME'][mask]
timemodel = np.linspace(t.min(), t.max(), t.shape[0])
LC_Model = i_o.model(ini, timemodel)
plt.scatter(timemodel, LC_Model, marker='.')
it = i_o.in_transit_range(timemodel, LC_Model, ini)
ot = i_o.out_transit_range(timemodel, it, ini)
img_i = diffimg.add_img(
    "hlsp_everest_k2_llc_201920032-c01_kepler_v2.0_lc.fits", it)
img_o = diffimg.add_img(
    "hlsp_everest_k2_llc_201920032-c01_kepler_v2.0_lc.fits", ot)
wcs = WCS(LC[3].header, key='P')
diff_img = diffimg.img_in_apr(LC[3].data, img_o - img_i)
diff_img_i = diffimg.img_in_apr(LC[3].data, img_i)
diff_img_o = diffimg.img_in_apr(LC[3].data, img_o)
unc = diffimg.unc_img(pf[1].data['FLUX_ERR'][mask], LC[3].data,
                      pf[1].data['TIME'][mask], np.vstack((it, ot)))
unc_i = diffimg.unc_img(pf[1].data['FLUX_ERR'][mask], LC[3].data,
                        pf[1].data['TIME'][mask], it)
unc_o = diffimg.unc_img(pf[1].data['FLUX_ERR'][mask], LC[3].data,
                        pf[1].data['TIME'][mask], ot)
#ax = plt.subplot(111, projection=wcs)
#ax.imshow(diff_img)
#ax.scatter(9.870969162523352-1,16-7.849627832013198-1,marker='x',color='white')

#centroid calculation from out-transit & diff imgage
wcs2 = WCS(LC[3].header)
Example #36
0
mwa.elevation = 377.827 #from sea level


waterfall=np.zeros((90, 768))

for timeStep in range(90):
	print("working on timestep " +str(timeStep))
	for f in range(768):
		print("Frequency channel " + str(f))
		hud1 = fits.open('1142521608-2m-' + str(timeStep) + '-' + str(f).zfill(4)+ '-image.fits')
		hud2 = fits.open('1142521608-2m-' + str(timeStep+1) + '-' + str(f).zfill(4)+ '-image.fits')

		header1 = hud1[0].header
		header2 = hud2[0].header
		
		wcs1 = WCS(header1, naxis=2)
		wcs2 = WCS(header2, naxis=2)

		UTCTime1 = datetime.strptime(header1['DATE-OBS'], '%Y-%m-%dT%H:%M:%S.%f') + timedelta(seconds=1)
		UTCTime2 = datetime.strptime(header2['DATE-OBS'], '%Y-%m-%dT%H:%M:%S.%f') + timedelta(seconds=1.25)
		

		#The below section calculates the position of the satellite in the top image in the cooridnate system of the top image

		mwa.date = UTCTime1
		sat.compute(mwa)
    		xy1 = wcs2.all_world2pix([[np.degrees(sat.ra.real), np.degrees(sat.dec.real)]], 1)[0]
    		x1 = int(np.floor(xy1[0]))
    		y1 = int(np.floor(xy1[1]))

    #   sys.exit(0)
    #    print(radec_deg)
    #    print(rmag)
    date = fits_root.split('@', -1)[0].split('-', -1)[-1]
    year = date[0:4]
    month = date[4:6]
    day = date[6:8]
    yearmonth = date[0:6]
    #   sys.exit(0)
    dir_file = yearmonth + '/slt' + date + '_calib_sci/'
    #    dir_reg=yearmonth+'/slt'+date+'_reg/'
    hdu = fits.open(dir_file + fits_calib)[0]
    imhead = hdu.header
    imdata = hdu.data
    wcs = WCS(imhead)
    #    print(wcs)

    calendar_date[k] = julian.from_jd(JD[i], fmt='jd')
    print('calendar_date', calendar_date[k])

    #    radec_pix=wcs.all_world2pix(ra_deg,dec_deg,1)
    ra_pix, dec_pix = wcs.all_world2pix(ra_deg, dec_deg, 1)
    ra_pix = ra_pix.tolist()
    dec_pix = dec_pix.tolist()
    #    print(ra_pix,dec_pix)
    #    print()

    #    mask=np.array

    #    background level and error
obj = objList[i]
cube_address = fitsFolder / fileList[i]
objFolder = resultsFolder / obj
voxelFolder = resultsFolder / obj / 'voxel_data'
db_addresss = objFolder / f'{obj}_database.fits'
mask_address = dataFolder / obj / f'{obj}_mask.txt'

# Declare voxels to analyse
flux6563_image = fits.getdata(db_addresss, f'H1_6563A_flux', ver=1)
flux6563_levels = np.nanpercentile(flux6563_image, pertil_array)
hdr_plot = fits.getheader(db_addresss, extname='PlotConf')
mask_data = fits.getdata(db_addresss, f'region_3', ver=1)
mask_array = np.ma.masked_array(mask_data, mask=mask_data)

fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(projection=WCS(hdr_plot), slices=('x', 'y', 1))
ax.update({
    'title': r'{} galaxy, $H\alpha$ flux'.format(obj),
    'xlabel': r'RA',
    'ylabel': r'DEC'
})

halpha_cmap = cm.gray
halpha_cmap.set_under(background_color)
im = ax.imshow(flux6563_image,
               interpolation='bicubic',
               cmap=halpha_cmap,
               norm=colors.SymLogNorm(linthresh=flux6563_levels[-2],
                                      vmin=flux6563_levels[-3],
                                      base=10))
# plt.savefig(folder/'muse_CGCG007_Halpha.png', resolution=300, bbox_inches='tight')
Example #39
0
    def from_fits_images(images,
                         position,
                         size=(10, 10),
                         extension=None,
                         target_id="unnamed-target",
                         **kwargs):
        """Creates a new Target Pixel File from a set of images.

        This method is intended to make it easy to cut out targets from
        Kepler/K2 "superstamp" regions or TESS FFI images.

        Attributes
        ----------
        images : list of str, or list of fits.ImageHDU objects
            Sorted list of FITS filename paths or ImageHDU objects to get
            the data from.
        position : astropy.SkyCoord
            Position around which to cut out pixels.
        size : (int, int)
            Dimensions (cols, rows) to cut out around `position`.
        extension : int or str
            If `images` is a list of filenames, provide the extension number
            or name to use. Default: 0.
        target_id : int or str
            Unique identifier of the target to be recorded in the TPF.
        **kwargs : dict
            Extra arguments to be passed to the `KeplerTargetPixelFile` constructor.

        Returns
        -------
        tpf : KeplerTargetPixelFile
            A new Target Pixel File assembled from the images.
        """
        if extension is None:
            if isinstance(images[0], str) and images[0].endswith("ffic.fits"):
                extension = 1  # TESS FFIs have the image data in extension #1
            else:
                extension = 0  # Default is to use the primary HDU

        factory = KeplerTargetPixelFileFactory(n_cadences=len(images),
                                               n_rows=size[0],
                                               n_cols=size[1],
                                               target_id=target_id)
        for idx, img in tqdm(enumerate(images), total=len(images)):
            if isinstance(img, fits.ImageHDU):
                hdu = img
            elif isinstance(img, fits.HDUList):
                hdu = img[extension]
            else:
                hdu = fits.open(img)[extension]
            if idx == 0:  # Get default keyword values from the first image
                factory.keywords = hdu.header
            cutout = Cutout2D(hdu.data,
                              position,
                              wcs=WCS(hdu.header),
                              size=size,
                              mode='partial')
            factory.add_cadence(frameno=idx,
                                flux=cutout.data,
                                header=hdu.header)
        return factory.get_tpf(**kwargs)
Example #40
0
width = 0.2  # in mags
# dm = 21.69

###################
g_m_iso, i_m_iso = np.loadtxt(iso_filename, usecols=(0, 1), unpack=True)
g_ierr, ix, iy, i_ierr, g_mag, i_mag, gmi = np.loadtxt(photcalib_filename,
                                                       usecols=(5, 6, 7, 10,
                                                                11, 12, 13),
                                                       unpack=True)
gmi_err = np.sqrt(g_ierr**2 + i_ierr**2)

# if you get an error about loading the WCS, uncomment the following lines to
# delete the pipeline WCS keywords from the header
# from pyraf import iraf
# iraf.imutil.hedit(images=wcs_source_image_filename, fields='PV*', delete='yes', verify='no')
w = WCS(wcs_source_image_filename)
ra, dec = w.all_pix2world(ix, iy, 1)

# i_m_iso = i_iso + dm
gi_iso = g_m_iso - i_m_iso

colors_left = gi_iso - width / 2.0
colors_right = gi_iso + width / 2.0

colors = np.concatenate((colors_left, np.flipud(colors_right)))
mags = np.concatenate((i_m_iso, np.flipud(i_m_iso)))

verts = zip(colors, mags)  # set up the Path necessary for testing membership
cm_filter = Path(verts)

stars_f = np.empty_like(gmi, dtype=bool)
Example #41
0
    def __init__(self,
                 sigmas,
                 header,
                 wavelengths,
                 alphas,
                 aper_corr=1.0,
                 nsigma=1.0,
                 flim_model=None,
                 mask=None,
                 cache_sim_interp=True,
                 verbose=False):

        if type(mask) != type(None):
            mask = logical_not(mask)
            mask3d = repeat(mask[newaxis, :, :], sigmas.shape[0], axis=0)
            self.sigmas = maskedarray(sigmas / nsigma,
                                      mask=mask3d,
                                      fill_value=999.0)
        else:
            self.sigmas = maskedarray(sigmas / nsigma, fill_value=999.0)

        # collapse the data to create a continuum mask
        self.collapsed_data = filled(self.sigmas, 0).sum(axis=0)

        self.nsigma = nsigma

        # Grab the flux limit model
        self.f50_from_noise, self.sinterp, interp_sigmas \
                                       = return_flux_limit_model(flim_model,
                                                                 cache_sim_interp=cache_sim_interp,
                                                                 verbose = verbose)

        self.sigma_interpolate = None
        if interp_sigmas:
            indicesz = arange(self.sigmas.shape[0])
            indicesy = arange(self.sigmas.shape[1])
            indicesx = arange(self.sigmas.shape[2])

            self.sigma_interpolate = RegularGridInterpolator(
                (indicesz, indicesy, indicesx),
                self.sigmas.filled(fill_value=nan),
                fill_value=999)

        # Fix issue with header
        if not "CD3_3" in header:
            header["CD3_3"] = header["CDELT3"]
            header["CD3_1"] = 0.0
            header["CD3_2"] = 0.0
            header["CD2_3"] = 0.0
            header["CD1_3"] = 0.0

        self.wcs = WCS(header)
        self.header = header

        # Deal with aperture corrections
        if aper_corr:
            self.aper_corr = aper_corr
        elif "APCOR" in self.header:
            self.aper_corr = self.header["APCOR"]
        elif "APCOR0" in self.header:
            self.aper_corr = self.header["APCOR0"]
        else:
            self.aper_corr = 1.0

        self.sigmas = self.sigmas * self.aper_corr

        self.alphas = array(alphas)
        self.wavelengths = wavelengths

        # Depends if alphas depend on wavelength or
        # is specified per cube cell
        if len(self.alphas.shape) == 3:
            self.alpha_is_cube = True
        else:
            self.alpha_is_cube = False
            self.alpha_func = interp1d(wavelengths,
                                       alphas,
                                       fill_value="extrapolate")
Example #42
0
        rastr = kludgefile.read(19)
        decstr = kludgefile.read(1)  # ignore the CR
        decstr = kludgefile.read(19)
        if debug:
            print("Kludge coords: ", rastr, " ", decstr)
        kludgefile.close()
        solveRa = float(rastr)
        solveDec = float(decstr)

        if (debug):
            print("Solved RA= ", solveRa, " Dec=", solveDec)

        # Load the ccd image FITS hdulist using astropy.io.fits
        with fits.open('solve.fits', mode='readonly',
                       ignore_missing_end=True) as fitsfile:
            w = WCS(fitsfile[0].header)
            ccdRa = w.wcs.crval[0]
            ccdDec = w.wcs.crval[1]
        if (debug):
            print("CCD RA= ", solveRa, " Dec=", solveDec)

        # Compare the plate solve to the current RA/DEC, convert to arcsecs
        deltaRa = (solveRa - ccdRa) * 60 * 60
        deltaDec = (solveDec - ccdDec) * 60 * 60
        if (debug):
            print("Delta RA= ", deltaRa, " Delta Dec=", deltaDec)

        # If within the threshold arcsecs move the scope set solveOk and continue
        if ((deltaRa < 5) or (deltaDec < 5)):
            if debug:
                print("Deviation < 25 arcsecs, ignoring")
Example #43
0
        resolution_squared = resolution**2

        instrmagsky = data / (hdu[0].header['EXPTIME'] * resolution_squared)
        log_instrmagsky = np.log10(instrmagsky)
        log_instrmagsky_error = 0.434 * (
            (np.sqrt(data)) *
            (1 /
             (hdu[0].header['EXPTIME'] * resolution_squared))) / instrmagsky
        magnitude_sky = C - 2.5 * log_instrmagsky
        magnitude_sky_error = np.sqrt((C_error)**2 +
                                      (2.5 * log_instrmagsky_error)**2)

        #    3c) Find the altitude and azimuth associated with each pixel.
        #        See near the end of lightdome_photometry for how this is done
        #        using cAltAz = c.transform_to(coords.AltAz(obstime = time, location = loc))
        wcs = WCS(hdu[0].header)
        dirname = 'lightdome_timpanogos'  # Just hard-code for now.
        metad = read_metadata(dirname)
        loc = coords.EarthLocation(lat=metad['lat'] * u.deg,
                                   lon=metad['lon'] * u.deg,
                                   height=metad['elev'] * u.m)
        time = Time(hdu[0].header['DATE-OBS'], scale='utc')

        xlist = np.arange(0, len(magnitude_sky[:, 0]))
        ylist = np.arange(0, len(magnitude_sky[0, :]))
        xarr, yarr = np.meshgrid(xlist, ylist, indexing='ij')

        # Plot the image, with colorbar.
        norm = mpl.colors.Normalize(vmin=np.min(magnitude_sky),
                                    vmax=np.max(magnitude_sky))
        plt.clf()
Example #44
0
            image_reproject_from_healpix_to_file(fits.open(map_path)[map_hdu],
                                                 target_image_hdu_header,
                                                 filepath=filepath)
        else:
            array, footprint = image_reproject_from_healpix_to_file(
                fits.open(map_path)[map_hdu],
                target_image_hdu_header,
                filepath=None)
            return array, footprint
    else:
        return None, None


# -------------------------------------------------------------------------
if __name__ == '__main__':
    target_header = fits.open(
        '/pool/mmt/2015b/wise/ngc_663/w1/mosaic_bm/mosaic.fits')[0].header
    target_wcs = WCS(target_header)
    # haslam408 = fits.open('/pool/maps/LAMBDA/haslam408/haslam408_dsds_Remazeilles2014_ns2048.fits')[1]
    # haslam408 = fits.open('/pool/maps/LAMBDA/IRIS/IRIS_nohole_1_2048.fits')[1]
    # hdu_in = fits.open('/pool/MMT/2015b/iras/b1/mosaic16/mosaic.fits')[0]
    # array, footprint = image_reproject_wcs_to_file(hdu_in, target_header)
    array_, footprint_ = image_query('planck_857', target_header)
    fig = plt.figure()
    ax1 = plt.subplot(1, 1, 1, projection=target_wcs)
    ax1.imshow(array_, origin='lower', vmin=1, vmax=5000)
    ax1.coords.grid(color='white')
    ax1.coords['ra'].set_axislabel('Right Ascension')
    ax1.coords['dec'].set_axislabel('Declination')
    fig.canvas.draw()
Example #45
0
def makeMovie(workingdir,
              cube,
              name,
              redshift,
              center,
              numframes=30,
              scalefactor=2.0,
              cmap=cm.plasma,
              background_color='black',
              thresh=None,
              logscale=False,
              contsub=False):
    '''Make the movie'''

    ########### READ THE DATA CUBE ####################
    hdulist = fits.open(cube)
    data = hdulist[1].data
    header = hdulist[1].header
    wcs_3d = WCS(header)
    #wcs = wcs_3d
    wcs = wcs_3d.dropaxis(2)
    hdulist.close()

    number_of_channels = len(data[:, 0, 0])

    # Create the wavelength array
    wavelength = ((np.arange(number_of_channels) + 1.0) -
                  header['CRPIX3']) * header['CD3_3'] + header['CRVAL3']

    # This quick one liner finds the element in the wavelength array
    # that is closest to the "target" wavelength, then returns that element's
    # index.

    # It finds the deviation between each array element and the target value,
    # takes its absolute value, and then returns the index of
    # the element with the smallest value in the resulting array.
    # This is the number that is closest to the target.

    center_channel = (np.abs(wavelength - center)).argmin()
    #print("Emission line centroid for {} is in channel {}".format(name,center_channel))

    movie_start = center_channel - numframes
    movie_end = center_channel + numframes

    slices_of_interest = np.arange(movie_start, movie_end, 1)

    ########### CREATE AND SCRUB THE TEMPORARY FRAMESTORE ##############
    temp_movie_dir = workingdir + "framestore/"
    if not os.path.exists(temp_movie_dir):
        os.makedirs(temp_movie_dir)
        print(
            "Created a temporary directory called '{}', where movie frame .png files are stored. You can delete this afterward if you'd like."
            .format(temp_movie_dir))

    png_files = []

    # Clean the temporary movie directory first
    # If you don't remove all "old" movie frames, your gif is gonna be messed up.
    for f in glob.glob(temp_movie_dir + "*.png"):
        os.remove(f)
    #####################################################################

    print("\nMaking movie for {} at z={}. Line centroid is in channel {}.".
          format(name, round(redshift, 3), center_channel))

    for i, slice in enumerate(slices_of_interest):

        if contsub is True:
            # Perform a dumb continuum subtraction. Risky if you land on another line.
            cont_sub_image = data[slice, :, :] - data[center_channel -
                                                      200, :, :]
            cont_sub_image[cont_sub_image < 0.005] = np.nan
            image = cont_sub_image
        elif contsub is False:
            image = data[slice, :, :]

        if thresh is not None:
            image[image < thresh] = np.nan

        sizes = np.shape(image)
        # Scale up the image by the scalefactor - higher means a larger GIF movie, in MB and inches.
        height = float(sizes[0]) * scalefactor
        width = float(sizes[1]) * scalefactor

        fig = plt.figure()
        fig.set_size_inches(width / height, 1, forward=False)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)

        # Set the background color (usually black or white, depending on the cmap)
        cmap.set_bad(background_color, 1)

        if logscale is True:
            ax.imshow(image,
                      origin='lower',
                      norm=LogNorm(),
                      cmap=cmap,
                      interpolation='None')
        elif logscale is False:
            ax.imshow(image, origin='lower', cmap=cmap, interpolation='None')

        fig.subplots_adjust(bottom=0)
        fig.subplots_adjust(top=1)
        fig.subplots_adjust(right=1)
        fig.subplots_adjust(left=0)

        fig.savefig(temp_movie_dir + '{}'.format(i) + '.png', dpi=height)
        png_files.append(temp_movie_dir + '{}'.format(i) + '.png')
        plt.close(fig)

    # Create and scrub the GIF directory
    gif_output_dir = workingdir + "movies/"
    if not os.path.exists(gif_output_dir):
        os.makedirs(gif_output_dir)
        print("Saving output movies to '{}'.".format(gif_output_dir))

    # Set the GIF filenames
    i = 0

    # Check if that gif name already exists:
    while os.path.exists(gif_output_dir + '{}_{}.gif'.format(name, i)):
        i += 1

    gif_name = gif_output_dir + '{}_{}.gif'.format(name.replace(' ', '-'), i)

    gif_frames = []
    for filename in png_files:
        gif_frames.append(imageio.imread(filename))

    imageio.mimsave(gif_name, gif_frames)
    print("Done. Saved to {}.".format(gif_name))
Example #46
0
class SensitivityCube(object):
    """
    Deals with flux limit cubes

    Parameters
    ----------
    sigmas : array
        3D datacube of datascale/noise
        where noise is the noise on
        a point source detection
    header : dict
        a dictionary of the headervalues to be stored in a
        FITS file
    wavelengths, alphas : array
        arrays of the wavelength in
        Angstrom and the alpha parameter
        of the Fleming+ 1995 function
    aper_corr : float (Optional)
        Aperture correction to multiply
        the cubes with. If None, read
        from header. If not in header
        and aper_corr=None do nothing.
        Default is 1.0.
    nsigma : float
        If the cubes don't contain
        1 sigma noise (e.g. in the HDR1
        cubes it's 6 sigma) specify it here
    flim_model : str
        the name of the flux limit model 
        to use. If None then use the latest
        (default)
    mask : array (optional)
        a spatial ra, dec mask with the same
        WCS and dimensions as the data (default:
        None)

    cache_sim_interp : bool (optional)
        cache the SimulationInterpolator,
        so if you use another SensitivityCube
        it will use the same model from before
        (hdr2pt1pt1 or later only, only works
         if you don't change flim_model)



    Attributes
    ----------
    sigmas : array
        an array of the noise values
    alpha_func : callable
        returns the Fleming alpha
        for an input wavelength
    wcs : astropy.wcs:WCS
        world coordinate system to convert between ra, dec, lambda
        and pixel
    f50_from_noise : callable
        function that converts the values
        in `sigmas` to flux values at 
        50% completeness

    """
    def __init__(self,
                 sigmas,
                 header,
                 wavelengths,
                 alphas,
                 aper_corr=1.0,
                 nsigma=1.0,
                 flim_model=None,
                 mask=None,
                 cache_sim_interp=True,
                 verbose=False):

        if type(mask) != type(None):
            mask = logical_not(mask)
            mask3d = repeat(mask[newaxis, :, :], sigmas.shape[0], axis=0)
            self.sigmas = maskedarray(sigmas / nsigma,
                                      mask=mask3d,
                                      fill_value=999.0)
        else:
            self.sigmas = maskedarray(sigmas / nsigma, fill_value=999.0)

        # collapse the data to create a continuum mask
        self.collapsed_data = filled(self.sigmas, 0).sum(axis=0)

        self.nsigma = nsigma

        # Grab the flux limit model
        self.f50_from_noise, self.sinterp, interp_sigmas \
                                       = return_flux_limit_model(flim_model,
                                                                 cache_sim_interp=cache_sim_interp,
                                                                 verbose = verbose)

        self.sigma_interpolate = None
        if interp_sigmas:
            indicesz = arange(self.sigmas.shape[0])
            indicesy = arange(self.sigmas.shape[1])
            indicesx = arange(self.sigmas.shape[2])

            self.sigma_interpolate = RegularGridInterpolator(
                (indicesz, indicesy, indicesx),
                self.sigmas.filled(fill_value=nan),
                fill_value=999)

        # Fix issue with header
        if not "CD3_3" in header:
            header["CD3_3"] = header["CDELT3"]
            header["CD3_1"] = 0.0
            header["CD3_2"] = 0.0
            header["CD2_3"] = 0.0
            header["CD1_3"] = 0.0

        self.wcs = WCS(header)
        self.header = header

        # Deal with aperture corrections
        if aper_corr:
            self.aper_corr = aper_corr
        elif "APCOR" in self.header:
            self.aper_corr = self.header["APCOR"]
        elif "APCOR0" in self.header:
            self.aper_corr = self.header["APCOR0"]
        else:
            self.aper_corr = 1.0

        self.sigmas = self.sigmas * self.aper_corr

        self.alphas = array(alphas)
        self.wavelengths = wavelengths

        # Depends if alphas depend on wavelength or
        # is specified per cube cell
        if len(self.alphas.shape) == 3:
            self.alpha_is_cube = True
        else:
            self.alpha_is_cube = False
            self.alpha_func = interp1d(wavelengths,
                                       alphas,
                                       fill_value="extrapolate")

    def get_alpha(self, ra, dec, lambda_):
        """
        Return the parameter controlling
        the slope of the Fleming+ (1995) function
        (only used for the old flux limit models)

        """

        # If alpha is just an array versus wavelength
        # return the value here
        if not self.alpha_is_cube:
            return self.alpha_func(lambda_)

        # Alpha stored in a cube
        ix, iy, iz = self.radecwltoxyz(ra, dec, lambda_)

        # Check for stuff outside of cube
        bad_vals = (ix >= self.alphas.shape[2]) | (ix < 0)
        bad_vals = bad_vals | (iy >= self.alphas.shape[1]) | (iy < 0)
        bad_vals = bad_vals | (iz >= self.alphas.shape[0]) | (iz < 0)

        ix[(ix >= self.alphas.shape[2]) | (ix < 0)] = 0
        iy[(iy >= self.alphas.shape[1]) | (iy < 0)] = 0
        iz[(iz >= self.alphas.shape[0]) | (iz < 0)] = 0

        alphas_here = self.alphas[iz, iy, ix]

        # Support arrays and floats
        try:
            alphas_here[bad_vals] = 999.0
        except TypeError:
            if isnan(bad_vals):
                aphas_here = 999.0

        return alphas_here

    @classmethod
    def from_file(cls,
                  fn_sensitivity_cube,
                  wavelengths,
                  alphas,
                  datascale=1e-17,
                  **kwargs):
        """
        Read in a sensitivity cube
        from a file
        
        Parameters
        ----------
        fn_sensitivity_cube : str
            the file name of a cube
            containing the limiting
            magnitude   
        wavelengths, alphas : array
            arrays of the wavelength in
            Angstrom and the alpha parameter
            of the Fleming+ 1995 function
        datascale : float (optional)
            the values stored are 
            this_value/flim
        **kwargs :
            these are passed to the SensitivityCube init
        """

        sigmas, header = read_cube(fn_sensitivity_cube, datascale=datascale)

        return SensitivityCube(sigmas, header, wavelengths, alphas, **kwargs)

    def apply_flux_recalibration(self,
                                 rescale,
                                 flux_calib_correction_file=None):
        """
        Apply a recalibration of the fluxes to the 
        cube

        Parameters
        ----------
        rescale : float 
           value to multiply the flux limit cubes
           to rescale

        flux_calib_correction_file : str (optional)
           filename containing a polynomial
           fit (HETDEX - TRUTH)/HETDEX versus
           wavelength to correct for 
           problems with the flux
           calibration. Should be a polynomial
           centered on 4600, i.e. input to
           polyval(pvals, wl - 4600.0)
        """

        if flux_calib_correction_file:
            pvals = loadtxt(flux_calib_correction_file)

        for iz in range(self.sigmas.shape[0]):
            ra, dec, wl = self.wcs.wcs_pix2world(0, 0, iz, 0)

            if wl < 3850.0:
                wl = 3850.0

            if flux_calib_correction_file:
                self.sigmas[iz, :, :] = rescale * self.sigmas[iz, :, :] * (
                    1.0 - polyval(pvals, wl - 4600.0))
            else:
                self.sigmas[iz, :, :] = rescale * self.sigmas[iz, :, :]

    def radecwltoxyz(self, ra, dec, lambda_, round_=True):
        """
        Convert ra, dec, wavelength position to
        x,y, z coordinate of cube

        Parameters
        ----------
        ra, dec : arrays
            right ascension &
            declination of source
        lambda_ : array
            wavelength in Angstrom
        round_ : bool
            if true, round to nearest
            integer (default is True)

        Returns
        -------
        ix,iy,iz : arrays of int
            indices of arrays for datacube
        """

        lambda_ = array(lambda_)
        ix, iy, iz = self.wcs.wcs_world2pix(ra, dec, lambda_, 0)

        if round_:
            return array(around(ix), dtype=int), array(around(iy), dtype=int), \
                array(around(iz), dtype=int)
        else:
            return array(ix), array(iy), array(iz)

    def get_average_f50(self, ra, dec, lambda_, sncut, npix=1):
        """
        Get the maximum 50% completeness flux from the cube in
        an npix box around and ra, dec, lambda

        Parameters
        ----------
        ra, dec : array
            right ascension and dec in degrees
        lambda_ : array
            wavelength in Angstroms
        sncut : float
            cut in detection significance 
            that defines this catalogue
        npix : int
            the box will be 2*npix + 1 on
            a side, i.e. number of pixels
            around the position to 
            consider.
 
        Returns
        -------
        f50s : array
            max flux limits in cubes. If outside
            of cube return 999
        """

        ixc, iyc, izc = self.radecwltoxyz(ra, dec, lambda_)

        na = int(2 * npix + 1)

        # [x1-1, x1, x1+1, x2-1, x2, x2+1, .....]
        offsets = arange(-1.0 * npix, npix + 1, 1, dtype=int)
        ix = ixc.repeat(na) + tile(offsets, len(ixc))

        # same x for all x, y in loop
        ix = ix.repeat(na * na)

        iy = iyc.repeat(na) + tile(offsets, len(iyc))

        # same y for all z values in loop
        iy = iy.repeat(na)

        # tile full y-loop for each x-value
        iy = tile(iy.reshape(len(iyc), na * na), na)
        iy = iy.flatten()

        # [z1-1, z1, z1+1, z2-1, z2, z2+1, .....]
        iz = izc.repeat(len(offsets)) + tile(offsets, len(izc))

        # z axis fastest repeating, tile z loop for every x and y value
        iz = tile(iz.reshape(len(izc), na), na * na)
        iz = iz.flatten()

        # Check for stuff outside of cube
        bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
        bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
        bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)

        ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
        iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
        iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0

        f50s = self.f50_from_noise(self.sigmas.filled()[iz, iy, ix], lambda_,
                                   sncut)

        # Support arrays and floats
        f50s[bad_vals] = 999.0

        #print(ix)
        #print(iy)
        #print(iz)

        # return the average flim in the area
        f50s = f50s * f50s
        return sqrt(f50s.reshape(len(ra), na * na * na).mean(axis=1))

    def get_collapsed_value(self, ra, dec):

        ix, iy, iz = self.radecwltoxyz(ra, dec, 4500., round_=True)

        # Check for stuff outside of cube
        bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
        bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)

        ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
        iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0

        # XXX not using interpolation
        noise = self.collapsed_data[iy, ix]
        noise[bad_vals] = 999.0

        return noise

    def get_local_max_f50(self, ra, dec, lambda_, sncut, npix=1):
        """
        Get the maximum 50% completeness flux from the cube in
        an npix box around and ra, dec, lambda

        Parameters
        ----------
        ra, dec : array
            right ascension and dec in degrees
        lambda_ : array
            wavelength in Angstroms
        sncut : float
            cut in detection significance 
            that defines this catalogue
        npix : int
            the box will be 2*npix + 1 on
            a side, i.e. number of pixels
            around the position to 
            consider.
 
        Returns
        -------
        f50s : array
            max flux limits in cubes. If outside
            of cube return 999
        """

        ixc, iyc, izc = self.radecwltoxyz(ra, dec, lambda_)

        na = int(2 * npix + 1)

        # [x1-1, x1, x1+1, x2-1, x2, x2+1, .....]
        offsets = arange(-1.0 * npix, npix + 1, 1, dtype=int)
        ix = ixc.repeat(na) + tile(offsets, len(ixc))

        # same x for all x, y in loop
        ix = ix.repeat(na * na)

        iy = iyc.repeat(na) + tile(offsets, len(iyc))

        # same y for all z values in loop
        iy = iy.repeat(na)

        # tile full y-loop for each x-value
        iy = tile(iy.reshape(len(iyc), na * na), na)
        iy = iy.flatten()

        # [z1-1, z1, z1+1, z2-1, z2, z2+1, .....]
        iz = izc.repeat(len(offsets)) + tile(offsets, len(izc))

        # z axis fastest repeating, tile z loop for every x and y value
        iz = tile(iz.reshape(len(izc), na), na * na)
        iz = iz.flatten()

        # Check for stuff outside of cube
        bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
        bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
        bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)

        ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
        iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
        iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0

        f50s = self.f50_from_noise(self.sigmas.filled()[iz, iy, ix], lambda_,
                                   sncut)

        # Support arrays and floats
        f50s[bad_vals] = 999.0

        #print(ix)
        #print(iy)
        #print(iz)

        # return the max value in area
        return f50s.reshape(len(ra), na * na * na).max(axis=1)

    def get_f50(self, ra, dec, lambda_, sncut):
        """
        Get 50% completeness flux from the cube at
        ra, dec, lambda

        Parameters
        ----------
        ra, dec : array
            right ascension and dec in degrees
        lambda_ : array
            wavelength in Angstroms
        sncut : float
            cut in detection significance 
            that defines this catalogue

        Returns
        -------
        f50s : array
            flux limits. If outside
            of cube return 999

        """

        if self.sigma_interpolate:
            round_ = False
        else:
            round_ = True

        ix, iy, iz = self.radecwltoxyz(ra, dec, lambda_, round_=round_)

        # Check for stuff outside of cube
        bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
        bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
        bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)

        ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
        iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
        iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0

        if self.sigma_interpolate:
            coords = dstack((iz, iy, ix))[0]
            noise = self.sigma_interpolate(coords)
        else:
            noise = self.sigmas.filled()[iz, iy, ix]

        f50s = self.f50_from_noise(noise, lambda_, sncut)

        # Support arrays and floats
        try:
            f50s[bad_vals] = 999.0
        except TypeError:
            if bad_vals:
                f50s = 999.0

        return f50s

    def compute_snr(self, flux, ra, dec, lambda_):
        """
        Compute the flux divided by the noise for 
        a given source. 

        Parameters
        ----------
        flux : array
            fluxes of objects
        ra, dec : array
            right ascension and dec in degrees
        lambda_ : array
            wavelength in Angstrom

        Return
        ------
        snr : array
            signal divided by noise


        """
        ix, iy, iz = self.radecwltoxyz(ra, dec, lambda_, round_=True)

        # Check for stuff outside of cube
        bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
        bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
        bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)

        ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
        iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
        iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0

        # HERE
        noise = self.sigmas.filled()[iz, iy, ix]
        snr = flux / noise

        # Support arrays and floats
        try:
            snr[bad_vals] = 0.0
        except TypeError:
            if isnan(snr):
                snr = 0.0

        return snr

    def return_completeness(self, flux, ra, dec, lambda_, sncut):
        """
        Return completeness at a 3D position as an array. 
        If for whatever reason the completeness is NaN, it's
        replaced by 0.0. 

        Parameters
        ----------
        flux : array
            fluxes of objects
        ra, dec : array
            right ascension and dec in degrees
        lambda_ : array
            wavelength in Angstrom
        sncut : float
            the detection significance (S/N) cut
            applied to the data

        Return
        ------
        fracdet : array
            fraction detected 

        Raises
        ------
        WavelengthException :
            Annoys user if they pass
            wavelength outside of
            VIRUS range
        """

        try:
            if lambda_[0] < 3000.0 or lambda_[0] > 6000.0:

                raise WavelengthException("""Odd wavelength value. Are you
                                             sure it's in Angstrom?""")
        except TypeError as e:
            if lambda_ < 3000.0 or lambda_ > 6000.0:

                raise WavelengthException("""Odd wavelength value. Are you
                                             sure it's in Angstrom?""")

        f50s = self.get_f50(ra, dec, lambda_, sncut)

        if self.sinterp:
            # interpolate over the simulation
            fracdet = self.sinterp(flux, f50s, lambda_, sncut)
        else:
            alphas = self.get_alpha(ra, dec, lambda_)
            fracdet = fleming_function(flux, f50s, alphas)

        try:
            fracdet[isnan(fracdet)] = 0.0
        except TypeError:
            if isnan(fracdet):
                fracdet = 0.0

        return fracdet

    def return_wlslice_completeness(self,
                                    flux,
                                    lambda_low,
                                    lambda_high,
                                    sncut,
                                    noise_cut=1e-15,
                                    pixlo=9,
                                    pixhi=22,
                                    return_vals=False):
        """
        Return completeness of a wavelength slice. NaN completeness
        values are replaced with zeroes, noise values greater than
        noise cut or NaN noise values are simply excluded from the 
        mean 

        Parameters
        ----------
        flux : array
            fluxes of objects
        lambda_low, lambda_high : float
            wavelength slice in Angstrom
            (includes these slices)
        sncut : float
            the detection significance (S/N) cut
            applied to the data
        noise_cut : float
            remove areas with more noise
            than this. Default: 1e-16 erg/s/cm2
        return_vals : bool (optional)
            if true alse return an array
            of the noise values
 
        Return
        ------
        fracdet : array
            fraction detected in this slice

        """

        if lambda_low < 3000.0 or lambda_low > 6000.0:
            raise WavelengthException("""Odd wavelength value. Are you
                                         sure it's in Angstrom?""")

        ix, iy, izlo = self.radecwltoxyz(self.wcs.wcs.crval[0],
                                         self.wcs.wcs.crval[1], lambda_low)
        ix, iy, izhigh = self.radecwltoxyz(self.wcs.wcs.crval[0],
                                           self.wcs.wcs.crval[1], lambda_high)

        if izlo < 0:
            print("Warning! Lower wavelength below range")
            izlo = 0

        if izhigh > self.sigmas.shape[0] - 1:
            print("Warning! Upper wavelength above range")
            izhigh = self.sigmas.shape[0] - 1

        izlo = int(izlo)
        izhigh = int(izhigh)

        # remove pixel border and select wavelength slice
        noise = self.sigmas.filled()[izlo:(izhigh + 1), pixlo:pixhi,
                                     pixlo:pixhi]

        # Test what happens with fixed noise
        #noise = noise*0 + normal(loc=1e-17, scale=2e-18,
        #                         size=noise.shape[0]*noise.shape[1]).reshape(noise.shape[0], noise.shape[1])

        # create a cube of the wavelengths
        r, d, wl_1d = self.wcs.wcs_pix2world(ones(1 + izhigh - izlo),
                                             ones(1 + izhigh - izlo),
                                             range(izlo, izhigh + 1), 0)
        waves = wl_1d.repeat(noise.shape[1] * noise.shape[2])

        try:
            waves = waves.reshape(noise.shape)
        except ValueError as e:
            print(noise.shape)
            print(len(wl_1d))
            print(izlo, izhigh)

        # remove masked data and bad data
        sel = (noise < noise_cut) & isfinite(noise)
        noise = noise[sel]
        waves = waves[sel]

        # Test for fixed lambda
        # waves = waves*0 + lambda_low

        if len(noise) == 0:
            if return_vals:
                return [], []
            else:
                return []

        f50s = self.f50_from_noise(noise, waves, sncut)

        if type(self.sinterp) == type(None):
            if len(self.alphas.shape) > 1:
                alphas = self.alphas[izlo:(izhigh + 1), :, :]
            else:
                # rough approximation to lambda varying across window
                alphas = self.alpha_func(0.5 * (lambda_low + lambda_high))

        compls = []
        for f in flux:
            if self.sinterp:
                compl = self.sinterp(f, f50s.flatten(), waves.flatten(), sncut)
            else:
                compl = fleming_function(f, f50s, alphas)

            compl[isnan(compl)] = 0.0

            # works so long as pixels equal area
            if len(compl) > 0:
                compls.append(mean(compl))
            else:
                compls.append(0.0)

        if return_vals:
            return array(compls), noise.flatten()
        else:
            return array(compls)

    def return_wlslice_f50(self,
                           lambda_low,
                           lambda_high,
                           sncut,
                           noise_cut=1e-16):
        """
        Return flux at 50% completeness of a wavelength slice.  

        Parameters
        ----------
        lambda_low, lambda_high : float
            wavelength slice in Angstrom
            (includes these slices)
        sncut : float
            the detection significance (S/N) cut
            applied to the data
        noise_cut : float (optional)
            remove areas with more noise
            than this. Default: 1e-16 erg/s/cm2
 
        Return
        ------
        f50 : float
            the flux at 50% completeness
            for the given ``sncut`` in this
            wavelength slice

        """

        try:
            if lambda_low < 3000.0 or lambda_low > 6000.0:
                raise WavelengthException("""Odd wavelength value. Are you
                                             sure it's in Angstrom?""")
        except ValueError:
            if any(lambda_low < 3000.0) or any(lambda_low > 6000.0):
                raise WavelengthException("""Odd wavelength value. Are you
                                             sure it's in Angstrom?""")

        ix, iy, izlo = self.radecwltoxyz(self.wcs.wcs.crval[0],
                                         self.wcs.wcs.crval[1], lambda_low)
        ix, iy, izhigh = self.radecwltoxyz(self.wcs.wcs.crval[0],
                                           self.wcs.wcs.crval[1], lambda_high)
        noise = self.sigmas.filled()[izlo:(izhigh + 1), :, :]
        noise = noise[(noise < noise_cut) & (noise > 0)]

        wl_mid = 0.5 * (lambda_low + lambda_high)
        f50 = self.f50_from_noise(median(noise), wl_mid, sncut)

        return f50

    def write(self, filename, datascale=1e-17, **kwargs):
        """
        Write the sensitivity cube to a FITS file. If any 
        aperture correction was applied, this is removed
        such that the saved data file should be identical 
        to the input (within numerical accuracy).

        Parameters
        ----------
        filename : str 
            Filename to write to
        datascale : float
           the scaling to apply to the
           inverse of the cube values 
           (Optional, default 1e-17)
        **kwargs :
            passed to the astropy.io.fits:writeto
            function
        """

        fits.writeto(filename,
                     self.aper_corr * datascale / self.sigmas.data,
                     header=self.header,
                     **kwargs)
Example #47
0
                         maskFits_address,
                         ext_mask=masks,
                         ext_log='_LINELOG',
                         page_hdr=plot_dict)

    # parameter_maps(fitsLog_address, param_images, objFolder, ext_log='_LINELOG',
    #                page_hdr=plot_dict, image_shape=(500,500))

    for param, user_lines in param_images.items():
        fits_file = Path(objFolder) / f'{param}.fits'
        with fits.open(fits_file):
            for line in user_lines:
                param_image = fits.getdata(fits_file, line)
                param_hdr = fits.getheader(fits_file, line)
                fig = plt.figure(figsize=(5, 5))
                ax = fig.add_subplot(projection=WCS(fits.Header(param_hdr)),
                                     slices=('x', 'y'))
                im = ax.imshow(param_image)
                ax.update({
                    'title': f'Galaxy {obj}: {param}-{line}',
                    'xlabel': r'RA',
                    'ylabel': r'DEC'
                })
                # plt.tight_layout()
                plt.show()

    # # ----------------------------------------- Generate the image data
    #
    # # Empty containers for the images
    # image_dict = {}
    # for chemLabel, plotLabel in label_Conver.items():
Example #48
0
def non_linear_wcs1d_fits(file_name,
                          spectral_axis_unit=None,
                          flux_unit=None,
                          **kwargs):
    """Read wcs from files written by IRAF

    IRAF does not strictly follow the fits standard specially for non-linear
     wavelength solutions

    Parameters
    ----------

    file_name : str
        Name of file to load

    spectral_axis_unit : `~astropy.Unit`, optional
        Spectral axis unit, default is None in which case will search for it
        in the header under the keyword 'WAT1_001'

    flux_unit : `~astropy.Unit`, optional
        Flux units, default is None. If not specified will attempt to read it
        using the keyword 'BUNIT' and if this keyword does not exist it will
        assume 'ADU'.

    Returns
    -------
    `specutils.Spectrum1D`
    """

    logging.info('Loading 1D non-linear fits solution')

    with fits.open(file_name, **kwargs) as hdulist:
        header = hdulist[0].header
        for wcsdim in range(1, header['WCSDIM'] + 1):
            ctypen = header['CTYPE{:d}'.format(wcsdim)]
            if ctypen == 'LINEAR':
                logging.info("linear Solution: Try using "
                             "`format='wcs1d-fits'` instead")
                wcs = WCS(header)
                spectral_axis = _read_linear_iraf_wcs(
                    wcs=wcs, dc_flag=header['DC-FLAG'])
            elif ctypen == 'MULTISPE':
                logging.info("Multi spectral or non-linear solution")
                spectral_axis = _read_non_linear_iraf_wcs(header=header,
                                                          wcsdim=wcsdim)
            else:
                raise NotImplementedError

        if flux_unit is not None:
            data = hdulist[0].data * flux_unit
        elif 'BUNIT' in header:
            data = u.Quantity(hdulist[0].data, unit=header['BUNIT'])
        else:
            logging.info("Flux unit was not provided, neither it was in the"
                         "header. Assuming ADU.")
            data = u.Quantity(hdulist[0].data, unit='adu')

        if spectral_axis_unit is not None:
            spectral_axis *= spectral_axis_unit
        else:
            wat_head = header['WAT1_001']
            wat_dict = dict()
            for pair in wat_head.split(' '):
                wat_dict[pair.split('=')[0]] = pair.split('=')[1]
            if wat_dict['units'] == 'angstroms':
                logging.info("Found spectral axis units to be angstrom")
                spectral_axis *= u.angstrom

        meta = {'header': header}
    return Spectrum1D(flux=data, spectral_axis=spectral_axis, meta=meta)
Example #49
0
    sigma_p1, peak_max_p1, peak_min_p1 = get_noise(t_p1)
    sigma_p2, peak_max_p2, peak_min_p2 = get_noise(t_p2)
    sigma_p2a1, peak_max_p2a1, peak_min_p2a1 = get_noise(t_p2a1)

    mask_p0 = np.amax([10*sigma_p0, -peak_min_p0*1.5])
    mask_p1 = np.amax([10*sigma_p1, -peak_min_p1*1.5])
    mask_p2 = np.amax([10*sigma_p2, -peak_min_p2*1.5])

    fig = plt.figure(figsize=(8, 6))

    filename = t_p0
    hdu = fits.open(filename)
    data = hdu[0].data[0, 0]
    centre_pix_y = int(data.shape[0]/2)
    centre_pix_x = int(data.shape[1]/2)
    wcs = WCS(hdu[0].header).dropaxis(3).dropaxis(2)
    cutout = Cutout2D(hdu[0].data[0, 0], position=(
        centre_pix_x, centre_pix_y), size=(400, 400), wcs=wcs)

    ax = fig.add_subplot(2, 2, 1)
    plt.imshow(cutout.data, vmin=-10.0*sigma_p2, vmax=30.0 *
               sigma_p2, origin='lower', cmap='cubehelix')
    plt.contour(cutout.data, levels=[mask_p0])
    plt.xticks([])
    plt.yticks([])
    ax.set_title('mask level %s sigma \n rms %s' %
                 (int(mask_p0/sigma_p0), sigma_p0))
    hdu.close()

    filename = t_p1
    hdu = fits.open(filename)
Example #50
0
class GaussPyDecompose(object):
    """Decompose spectra with GaussPy+.

    Attributes
    ----------
    path_to_pickle_file : str
        Filepath to the pickled dictionary produced by GaussPyPrepare.
    dirpath_gpy : str
        Directory in which all files produced by GaussPy+ are saved

    two_phase_decomposition : bool
        'True' (default) uses two smoothing parameters (alpha1, alpha2) for the decomposition. 'False' uses only the alpha1 smoothing parameter.
    save_initial_guesses : bool
        Default is 'False'. Set to 'True' if initial GaussPy fitting guesses should be saved.
    alpha1 : float
        First smoothing parameter.
    alpha2 : float
        Second smoothing parameter. Only used if two_phase_decomposition is set to 'True'
    snr_thresh : float
        S/N threshold used for the original spectrum.
    snr2_thresh : float
        S/N threshold used for the second derivate of the smoothed spectrum.

    use_ncpus : int
        Number of CPUs used in the decomposition. By default 75% of all CPUs on the machine are used.
    fitting : dct
        Description of attribute `fitting`.
    separation_factor : float
        The required minimum separation between two Gaussian components (mean1, fwhm1) and (mean2, fwhm2) is determined as separation_factor * min(fwhm1, fwhm2).
    main_beam_efficiency : float
        Default is 'None'. Specify if intensity values should be corrected by the main beam efficiency.
    vel_unit : astropy.units
        Default is 'u.km/u.s'. Unit to which velocity values will be converted.
    testing : bool
        Default is 'False'. Set to 'True' if in testing mode.
    verbose : bool
        Default is 'True'. Set to 'False' if descriptive statements should not be printed in the terminal.
    suffix : str
        Suffix for filename of the decomposition results.
    log_output : bool
        Default is 'True'. Set to 'False' if terminal output should not be logged.

    """
    def __init__(self, path_to_pickle_file=None, config_file=''):
        self.path_to_pickle_file = path_to_pickle_file
        self.dirpath_gpy = None

        # self.gausspy_decomposition = True
        self.two_phase_decomposition = True
        self.save_initial_guesses = False
        self.alpha1 = None
        self.alpha2 = None
        self.snr_thresh = None
        self.snr2_thresh = None

        self.improve_fitting = True
        self.exclude_means_outside_channel_range = True
        self.min_fwhm = 1.
        self.max_fwhm = None
        self.snr = 3.
        self.snr_fit = None
        self.significance = 5.
        self.snr_negative = None
        self.rchi2_limit = None
        self.max_amp_factor = 1.1
        self.refit_neg_res_peak = True
        self.refit_broad = True
        self.refit_blended = True
        self.separation_factor = 0.8493218
        self.fwhm_factor = 2.
        self.min_pvalue = 0.01
        self.max_ncomps = None

        self.main_beam_efficiency = None
        self.vel_unit = u.km / u.s
        self.testing = False
        self.verbose = True
        self.suffix = ''
        self.log_output = True
        self.use_ncpus = None

        self.single_prepared_spectrum = None

        if config_file:
            get_values_from_config_file(self,
                                        config_file,
                                        config_key='decomposition')

    def getting_ready(self):
        string = 'GaussPy decomposition'
        banner = len(string) * '='
        heading = '\n' + banner + '\n' + string + '\n' + banner
        say(heading, logger=self.logger)

    def initialize_data(self):
        self.logger = False
        if self.log_output:
            self.logger = set_up_logger(self.dirpath_gpy,
                                        self.filename,
                                        method='g+_decomposition')

        say("\npickle load '{}'...".format(self.file), logger=self.logger)

        with open(self.path_to_pickle_file, "rb") as pickle_file:
            self.pickled_data = pickle.load(pickle_file, encoding='latin1')

        if 'header' in self.pickled_data.keys():
            self.header = correct_header(self.pickled_data['header'])
            self.wcs = WCS(self.header)
            self.velocity_increment = (self.wcs.wcs.cdelt[2] *
                                       self.wcs.wcs.cunit[2]).to(
                                           self.vel_unit).value
        if 'location' in self.pickled_data.keys():
            self.location = self.pickled_data['location']
        if 'nan_mask' in self.pickled_data.keys():
            self.nan_mask = self.pickled_data['nan_mask']
        if 'testing' in self.pickled_data.keys():
            self.testing = self.pickled_data['testing']
            self.use_ncpus = 1

        self.data = self.pickled_data['data_list']
        self.channels = self.pickled_data['x_values']
        self.errors = self.pickled_data['error']

    def check_settings(self):
        if self.path_to_pickle_file is None:
            raise Exception("Need to specify 'path_to_pickle_file'")

        self.dirname = os.path.dirname(self.path_to_pickle_file)
        self.file = os.path.basename(self.path_to_pickle_file)
        self.filename, self.file_extension = os.path.splitext(self.file)

        if self.dirpath_gpy is None:
            self.dirpath_gpy = os.path.normpath(self.dirname + os.sep +
                                                os.pardir)

        self.decomp_dirname = os.path.join(self.dirpath_gpy, 'gpy_decomposed')
        if not os.path.exists(self.decomp_dirname):
            os.makedirs(self.decomp_dirname)

        if self.main_beam_efficiency is None:
            warnings.warn(
                'assuming intensities are already corrected for main beam efficiency'
            )

        warnings.warn("converting velocity values to {}".format(self.vel_unit))

    def decompose(self):
        if self.single_prepared_spectrum:
            self.logger = False
            self.testing = True
            self.use_ncpus = 1
            self.log_output = False
            self.getting_ready()
            return self.start_decomposition()
        else:
            self.check_settings()
            self.initialize_data()
            self.getting_ready()
            self.start_decomposition()
            if 'batchdecomp_temp.pickle' in os.listdir(os.getcwd()):
                os.remove('batchdecomp_temp.pickle')

    def decomposition_settings(self):
        if self.snr_negative is None:
            self.snr_negative = self.snr
        if self.snr_fit is None:
            self.snr_fit = self.snr / 2.
        if self.snr_thresh is None:
            self.snr_thresh = self.snr
        if self.snr2_thresh is None:
            self.snr2_thresh = self.snr

        self.fitting = {
            'improve_fitting': self.improve_fitting,
            'min_fwhm': self.min_fwhm,
            'max_fwhm': self.max_fwhm,
            'snr': self.snr,
            'snr_fit': self.snr_fit,
            'significance': self.significance,
            'snr_negative': self.snr_negative,
            'rchi2_limit': self.rchi2_limit,
            'max_amp_factor': self.max_amp_factor,
            'neg_res_peak': self.refit_neg_res_peak,
            'broad': self.refit_broad,
            'blended': self.refit_blended,
            'fwhm_factor': self.fwhm_factor,
            'separation_factor': self.separation_factor,
            'exclude_means_outside_channel_range':
            self.exclude_means_outside_channel_range,
            'min_pvalue': self.min_pvalue,
            'max_ncomps': self.max_ncomps
        }

        string_gausspy = str('\ndecomposition settings:'
                             '\nGaussPy:'
                             '\nTwo phase decomposition: {a}'
                             '\nalpha1: {b}'
                             '\nalpha2: {c}'
                             '\nSNR1: {d}'
                             '\nSNR2: {e}').format(
                                 a=self.two_phase_decomposition,
                                 b=self.alpha1,
                                 c=self.alpha2,
                                 d=self.snr_thresh,
                                 e=self.snr2_thresh)
        say(string_gausspy, logger=self.logger)

        string_gausspy_plus = ''
        if self.fitting['improve_fitting']:
            for key, value in self.fitting.items():
                string_gausspy_plus += str('\n{}: {}').format(key, value)
        else:
            string_gausspy_plus += str('\nimprove_fitting: {}').format(
                self.fitting['improve_fitting'])
        say(string_gausspy_plus, logger=self.logger)

    def start_decomposition(self):
        if self.alpha1 is None:
            raise Exception("Need to specify 'alpha1' for decomposition.")

        if self.two_phase_decomposition and (self.alpha2 is None):
            raise Exception(
                "Need to specify 'alpha2' for 'two_phase_decomposition'.")

        self.decomposition_settings()
        say('\ndecomposing data...', logger=self.logger)

        from .gausspy_py3 import gp as gp
        g = gp.GaussianDecomposer()  # Load GaussPy
        g.set('use_ncpus', self.use_ncpus)
        g.set('SNR_thresh', self.snr_thresh)
        g.set('SNR2_thresh', self.snr2_thresh)
        g.set('improve_fitting_dict', self.fitting)
        g.set('alpha1', self.alpha1)

        if self.testing:
            g.set('verbose', True)
            g.set('plot', True)

        if self.two_phase_decomposition:
            g.set('phase', 'two')
            g.set('alpha2', self.alpha2)
        else:
            g.set('phase', 'one')

        if self.single_prepared_spectrum:
            return g.batch_decomposition(dct=self.single_prepared_spectrum)

        self.decomposition = g.batch_decomposition(self.path_to_pickle_file)

        self.save_final_results()

        if self.save_initial_guesses:
            self.save_initial_guesses()

    def save_initial_guesses(self):
        say('\npickle dump GaussPy initial guesses...', logger=self.logger)

        filename = '{}{}_fit_ini.pickle'.format(self.filename, self.suffix)
        pathname = os.path.join(self.decomp_dirname, filename)

        dct_initial_guesses = {}

        for key in [
                "index_initial", "amplitudes_initial", "fwhms_initial",
                "means_initial"
        ]:
            dct_initial_guesses[key] = self.decomposition[key]

        pickle.dump(dct_initial_guesses, open(pathname, 'wb'), protocol=2)
        say("\033[92mSAVED FILE:\033[0m '{}' in '{}'".format(
            filename, self.decomp_dirname),
            logger=self.logger)

    def save_final_results(self):
        say('\npickle dump GaussPy final results...', logger=self.logger)

        dct_gausspy_settings = {
            "two_phase": self.two_phase_decomposition,
            "alpha1": self.alpha1,
            "snr1_thresh": self.snr_thresh,
            "snr2_thresh": self.snr2_thresh
        }

        if self.two_phase_decomposition:
            dct_gausspy_settings["alpha2"] = self.alpha2

        dct_final_guesses = {}

        for key in [
                "index_fit", "best_fit_rchi2", "best_fit_aicc", "pvalue",
                "amplitudes_fit", "amplitudes_fit_err", "fwhms_fit",
                "fwhms_fit_err", "means_fit", "means_fit_err", "log_gplus",
                "N_neg_res_peak", "N_blended", "N_components",
                "quality_control"
        ]:
            dct_final_guesses[key] = self.decomposition[key]

        dct_final_guesses["gausspy_settings"] = dct_gausspy_settings

        dct_final_guesses["improve_fit_settings"] = self.fitting

        filename = '{}{}_fit_fin.pickle'.format(self.filename, self.suffix)
        pathname = os.path.join(self.decomp_dirname, filename)
        pickle.dump(dct_final_guesses, open(pathname, 'wb'), protocol=2)
        say("\033[92mSAVED FILE:\033[0m '{}' in '{}'".format(
            filename, self.decomp_dirname),
            logger=self.logger)

    def load_final_results(self, pathToDecomp):
        self.check_settings()
        self.initialize_data()
        self.getting_ready()

        say('\npickle load final GaussPy results...', logger=self.logger)

        self.decomp_dirname = os.path.dirname(pathToDecomp)
        with open(pathToDecomp, "rb") as pickle_file:
            self.decomposition = pickle.load(pickle_file, encoding='latin1')

        self.file = os.path.basename(pathToDecomp)
        self.filename, self.file_extension = os.path.splitext(self.file)

        if 'header' in self.decomposition.keys():
            self.header = self.decomposition['header']
        if 'channels' in self.decomposition.keys():
            self.channels = self.decomposition['channels']
        if 'nan_mask' in self.pickled_data.keys():
            self.nan_mask = self.pickled_data['nan_mask']
        if 'location' in self.pickled_data.keys():
            self.location = self.pickled_data['location']

    def make_cube(self, mode='full_decomposition'):
        """Create FITS cube of the decomposition results.

        Parameters
        ----------
        mode : str
            'full_decomposition' recreates the whole FITS cube, 'integrated_intensity' creates a cube with the integrated intensity values of the Gaussian components placed at their mean positions, 'main_component' only retains the fitted component with the largest amplitude value
        """
        say('\ncreate {} cube...'.format(mode), logger=self.logger)

        x = self.header['NAXIS1']
        y = self.header['NAXIS2']
        z = self.header['NAXIS3']

        array = np.zeros([z, y, x], dtype=np.float32)
        nSpectra = len(self.decomposition['N_components'])

        for idx in range(nSpectra):
            ncomps = self.decomposition['N_components'][idx]
            if ncomps is None:
                continue

            yi = self.location[idx][0]
            xi = self.location[idx][1]

            amps = self.decomposition['amplitudes_fit'][idx]
            fwhms = self.decomposition['fwhms_fit'][idx]
            means = self.decomposition['means_fit'][idx]

            if self.main_beam_efficiency is not None:
                amps = [amp / self.main_beam_efficiency for amp in amps]

            if mode == 'main_component' and ncomps > 0:
                j = amps.index(max(amps))
                array[:, yi, xi] = gaussian(amps[j], fwhms[j], means[j],
                                            self.channels)
            elif mode == 'integrated_intensity' and ncomps > 0:
                for j in range(ncomps):
                    integrated_intensity = area_of_gaussian(
                        amps[j], fwhms[j] * self.velocity_increment)
                    channel = int(round(means[j]))
                    if self.channels[0] <= channel <= self.channels[-1]:
                        array[channel, yi, xi] += integrated_intensity
            elif mode == 'full_decomposition':
                array[:, yi, xi] = combined_gaussian(amps, fwhms, means,
                                                     self.channels)

            nans = self.nan_mask[:, yi, xi]
            array[:, yi, xi][nans] = np.NAN

        if mode == 'main_component':
            comment = 'Fit component with highest amplitude per spectrum.'
            filename = "{}{}_main.fits".format(self.filename, self.suffix)
        elif mode == 'integrated_intensity':
            comment = 'Integrated intensity of fit component at VLSR position.'
            filename = "{}{}_wco.fits".format(self.filename, self.suffix)
        elif mode == 'full_decomposition':
            comment = 'Recreated dataset from fit components.'
            filename = "{}{}_decomp.fits".format(self.filename, self.suffix)

        array[self.nan_mask] = np.nan

        comments = ['GaussPy+ decomposition results:']
        comments.append(comment)
        if self.main_beam_efficiency is not None:
            comments.append('Corrected for main beam efficiency of {}.'.format(
                self.main_beam_efficiency))

        header = update_header(self.header.copy(),
                               comments=comments,
                               write_meta=True)

        pathToFile = os.path.join(self.decomp_dirname, 'FITS', filename)
        save_fits(array, header, pathToFile, verbose=False)
        say("\033[92mSAVED FILE:\033[0m '{}' in '{}'".format(
            filename, os.path.dirname(pathToFile)),
            logger=self.logger)

    def create_input_table(self, ncomps_max=None):
        """Create a table of the decomposition results.

        The table contains the following columns:
        {0}: Pixel position in X direction
        {1}: Pixel position in Y direction
        {2}: Pixel position in Z direction
        {3}: Amplitude value of fitted Gaussian component
        {4}: Root-mean-square noise of the spectrum
        {5}: Velocity dispersion value of fitted Gaussian component
        {6}: Integrated intensity value of fitted Gaussian component
        {7}: Coordinate position in X direction
        {8}: Coordinate position in Y direction
        {9}: Mean position (VLSR) of fitted Gaussian component
        {10}: Error of amplitude value
        {11}: Error of velocity dispersion value
        {12}: Error of velocity value
        {13}: Error of integrated intensity value

        Amplitude and RMS values get corrected by the main_beam_efficiency parameter in case it was supplied.

        The table is saved in the 'gpy_tables' directory.

        Parameters
        ----------
        ncomps_max : int
            All spectra whose number of fitted components exceeds this value will be neglected.
        """
        say('\ncreate input table...', logger=self.logger)

        length = len(self.decomposition['amplitudes_fit'])

        x_pos, y_pos, z_pos, amp, rms, vel_disp, int_tot, x_coord, y_coord,\
            velocity, e_amp, e_vel_disp, e_velocity, e_int_tot = (
                [] for i in range(14))

        for idx in tqdm(range(length)):
            ncomps = self.decomposition['N_components'][idx]

            #  do not continue if spectrum was masked out, was not fitted,
            #  or was fitted by too many components
            if ncomps is None:
                continue
            elif ncomps == 0:
                continue
            elif ncomps_max is not None:
                if ncomps > ncomps_max:
                    continue

            yi, xi = self.location[idx]
            fit_amps = self.decomposition['amplitudes_fit'][idx]
            fit_fwhms = self.decomposition['fwhms_fit'][idx]
            fit_means = self.decomposition['means_fit'][idx]
            fit_e_amps = self.decomposition['amplitudes_fit_err'][idx]
            fit_e_fwhms = self.decomposition['fwhms_fit_err'][idx]
            fit_e_means = self.decomposition['means_fit_err'][idx]
            error = self.errors[idx][0]

            if self.main_beam_efficiency is not None:
                fit_amps = [
                    amp / self.main_beam_efficiency for amp in fit_amps
                ]
                fit_e_amps = [
                    e_amp / self.main_beam_efficiency for e_amp in fit_e_amps
                ]
                error /= self.main_beam_efficiency

            for j in range(ncomps):
                amp_value = fit_amps[j]
                e_amp_value = fit_e_amps[j]
                fwhm_value = fit_fwhms[j] * self.velocity_increment
                e_fwhm_value = fit_e_fwhms[j] * self.velocity_increment
                mean_value = fit_means[j]
                e_mean_value = fit_e_means[j]

                channel = int(round(mean_value))
                if channel < self.channels[0] or channel > self.channels[-1]:
                    continue

                x_wcs, y_wcs, z_wcs = self.wcs.wcs_pix2world(
                    xi, yi, mean_value, 0)

                x_pos.append(xi)
                y_pos.append(yi)
                z_pos.append(channel)
                rms.append(error)

                amp.append(amp_value)
                e_amp.append(e_amp_value)

                velocity.append(
                    (z_wcs * self.wcs.wcs.cunit[2]).to(self.vel_unit).value)
                e_velocity.append(e_mean_value * self.velocity_increment)

                vel_disp.append(fwhm_value / 2.354820045)
                e_vel_disp.append(e_fwhm_value / 2.354820045)

                integrated_intensity = area_of_gaussian(amp_value, fwhm_value)
                e_integrated_intensity = area_of_gaussian(
                    amp_value + e_amp_value, fwhm_value + e_fwhm_value) -\
                    integrated_intensity
                int_tot.append(integrated_intensity)
                e_int_tot.append(e_integrated_intensity)
                x_coord.append(x_wcs)
                y_coord.append(y_wcs)

        names = [
            'x_pos', 'y_pos', 'z_pos', 'amp', 'rms', 'vel_disp', 'int_tot',
            self.wcs.wcs.lngtyp, self.wcs.wcs.lattyp, 'VLSR', 'e_amp',
            'e_vel_disp', 'e_VLSR', 'e_int_tot'
        ]

        dtype = tuple(3 * ['i4'] + (len(names) - 3) * ['f4'])

        table = Table([
            x_pos, y_pos, z_pos, amp, rms, vel_disp, int_tot, x_coord, y_coord,
            velocity, e_amp, e_vel_disp, e_velocity, e_int_tot
        ],
                      names=names,
                      dtype=dtype)

        for key in names[3:]:
            table[key].format = "{0:.4f}"

        tableDirname = os.path.join(os.path.dirname(self.dirname),
                                    'gpy_tables')
        if not os.path.exists(tableDirname):
            os.makedirs(tableDirname)

        filename = '{}{}_wco.dat'.format(self.filename, self.suffix)
        pathToTable = os.path.join(tableDirname, filename)
        table.write(pathToTable, format='ascii', overwrite=True)
        say("\n\033[92mSAVED FILE:\033[0m '{}' in '{}'".format(
            filename, tableDirname),
            logger=self.logger)

    def produce_component_map(self, dtype='float32'):
        """Create FITS map showing the number of fitted components.

        The FITS file in saved in the gpy_maps directory.
        """
        say("\nmaking component map...", logger=self.logger)
        data = np.empty((self.header['NAXIS2'], self.header['NAXIS1']))
        data.fill(np.nan)

        for idx, ((y, x), components) in enumerate(
                zip(self.location, self.decomposition['N_components'])):
            if components is not None:
                data[y, x] = components

        comments = ['Number of fitted GaussPy components']
        header = change_header(self.header.copy(),
                               format='pp',
                               comments=comments)

        filename = "{}{}_component_map.fits".format(self.filename, self.suffix)
        pathToFile = os.path.join(os.path.dirname(self.dirname), 'gpy_maps',
                                  filename)

        save_fits(data.astype(dtype), header, pathToFile, verbose=True)

    def produce_rchi2_map(self, dtype='float32'):
        """Create FITS map showing the reduced chi-square values of the decomposition.

        The FITS file in saved in the gpy_maps directory.
        """
        say("\nmaking reduced chi2 map...", logger=self.logger)

        data = np.empty((self.header['NAXIS2'], self.header['NAXIS1']))
        data.fill(np.nan)

        for idx, ((y, x), components, rchi2) in enumerate(
                zip(self.location, self.decomposition['N_components'],
                    self.decomposition['best_fit_rchi2'])):
            if components is not None:
                if rchi2 is None:
                    data[y, x] = 0.
                else:
                    data[y, x] = rchi2

        comments = ['Reduced chi2 values of GaussPy fits']
        header = change_header(self.header.copy(),
                               format='pp',
                               comments=comments)

        filename = "{}{}_rchi2_map.fits".format(self.filename, self.suffix)
        pathToFile = os.path.join(os.path.dirname(self.dirname), 'gpy_maps',
                                  filename)

        save_fits(data.astype(dtype), header, pathToFile, verbose=True)

    def produce_velocity_dispersion_map(self, mode='average', dtype='float32'):
        """Produce map showing the maximum velocity dispersions."""
        say("\nmaking map of maximum velocity dispersions...",
            logger=self.logger)

        data = np.empty((self.header['NAXIS2'], self.header['NAXIS1']))
        data.fill(np.nan)

        # TODO: rewrite this in terms of wcs and CUNIT
        factor_kms = self.header['CDELT3'] / 1e3

        for idx, ((y, x), fwhms) in enumerate(
                zip(self.location, self.decomposition['fwhms_fit'])):
            if fwhms is not None:
                if len(fwhms) > 0:
                    if mode == 'average':
                        data[y, x] = np.mean(fwhms) * factor_kms / 2.354820045
                    elif mode == 'maximum':
                        data[y, x] = max(fwhms) * factor_kms / 2.354820045
                else:
                    data[y, x] = 0

        if mode == 'average':
            comments = ['Average velocity dispersion values of GaussPy fits']
        elif mode == 'maximum':
            comments = ['Maximum velocity dispersion values of GaussPy fits']
        header = change_header(self.header.copy(),
                               format='pp',
                               comments=comments)

        filename = "{}{}_{}_veldisp_map.fits".format(self.filename,
                                                     self.suffix, mode)
        pathToFile = os.path.join(os.path.dirname(self.dirname), 'gpy_maps',
                                  filename)

        save_fits(data.astype(dtype), header, pathToFile, verbose=False)
        say(">> saved {} velocity dispersion map '{}' in {}".format(
            mode, filename, os.path.dirname(pathToFile)),
            logger=self.logger)
Example #51
0
def test_wcs_based_photometry():
    from astropy.wcs import WCS
    from astropy.wcs.utils import pixel_to_skycoord
    from ...datasets import make_4gaussians_image

    hdu = make_4gaussians_image(hdu=True, wcs=True)
    wcs = WCS(header=hdu.header)

    # hard wired positions in make_4gaussian_image
    pos_orig_pixel = u.Quantity(([160., 25., 150., 90.], [70., 40., 25., 60.]),
                                unit=u.pixel)

    pos_skycoord = pixel_to_skycoord(pos_orig_pixel[0], pos_orig_pixel[1], wcs)

    pos_skycoord_s = pos_skycoord[2]

    photometry_skycoord_circ = aperture_photometry(
        hdu, SkyCircularAperture(pos_skycoord, 3 * u.deg))
    photometry_skycoord_circ_2 = aperture_photometry(
        hdu, SkyCircularAperture(pos_skycoord, 2 * u.deg))
    photometry_skycoord_circ_s = aperture_photometry(
        hdu, SkyCircularAperture(pos_skycoord_s, 3 * u.deg))

    assert_allclose(photometry_skycoord_circ['aperture_sum'][2],
                    photometry_skycoord_circ_s['aperture_sum'])

    photometry_skycoord_circ_ann = aperture_photometry(
        hdu, SkyCircularAnnulus(pos_skycoord, 2 * u.deg, 3 * u.deg))
    photometry_skycoord_circ_ann_s = aperture_photometry(
        hdu, SkyCircularAnnulus(pos_skycoord_s, 2 * u.deg, 3 * u.deg))

    assert_allclose(photometry_skycoord_circ_ann['aperture_sum'][2],
                    photometry_skycoord_circ_ann_s['aperture_sum'])

    assert_allclose(
        photometry_skycoord_circ_ann['aperture_sum'],
        photometry_skycoord_circ['aperture_sum'] -
        photometry_skycoord_circ_2['aperture_sum'])

    photometry_skycoord_ell = aperture_photometry(
        hdu,
        SkyEllipticalAperture(pos_skycoord, 3 * u.deg, 3.0001 * u.deg,
                              45 * u.deg))
    photometry_skycoord_ell_2 = aperture_photometry(
        hdu,
        SkyEllipticalAperture(pos_skycoord, 2 * u.deg, 2.0001 * u.deg,
                              45 * u.deg))
    photometry_skycoord_ell_s = aperture_photometry(
        hdu,
        SkyEllipticalAperture(pos_skycoord_s, 3 * u.deg, 3.0001 * u.deg,
                              45 * u.deg))
    photometry_skycoord_ell_ann = aperture_photometry(
        hdu,
        SkyEllipticalAnnulus(pos_skycoord, 2 * u.deg, 3 * u.deg,
                             3.0001 * u.deg, 45 * u.deg))
    photometry_skycoord_ell_ann_s = aperture_photometry(
        hdu,
        SkyEllipticalAnnulus(pos_skycoord_s, 2 * u.deg, 3 * u.deg,
                             3.0001 * u.deg, 45 * u.deg))

    assert_allclose(photometry_skycoord_ell['aperture_sum'][2],
                    photometry_skycoord_ell_s['aperture_sum'])

    assert_allclose(photometry_skycoord_ell_ann['aperture_sum'][2],
                    photometry_skycoord_ell_ann_s['aperture_sum'])

    assert_allclose(photometry_skycoord_ell['aperture_sum'],
                    photometry_skycoord_circ['aperture_sum'],
                    rtol=5e-3)

    assert_allclose(photometry_skycoord_ell_ann['aperture_sum'],
                    photometry_skycoord_ell['aperture_sum'] -
                    photometry_skycoord_ell_2['aperture_sum'],
                    rtol=1e-4)

    photometry_skycoord_rec = aperture_photometry(hdu,
                                                  SkyRectangularAperture(
                                                      pos_skycoord, 6 * u.deg,
                                                      6 * u.deg, 0 * u.deg),
                                                  method='subpixel',
                                                  subpixels=20)
    photometry_skycoord_rec_4 = aperture_photometry(
        hdu,
        SkyRectangularAperture(pos_skycoord, 4 * u.deg, 4 * u.deg, 0 * u.deg),
        method='subpixel',
        subpixels=20)
    photometry_skycoord_rec_s = aperture_photometry(
        hdu,
        SkyRectangularAperture(pos_skycoord_s, 6 * u.deg, 6 * u.deg,
                               0 * u.deg),
        method='subpixel',
        subpixels=20)
    photometry_skycoord_rec_ann = aperture_photometry(
        hdu,
        SkyRectangularAnnulus(pos_skycoord, 4 * u.deg, 6 * u.deg, 6 * u.deg,
                              0 * u.deg),
        method='subpixel',
        subpixels=20)
    photometry_skycoord_rec_ann_s = aperture_photometry(
        hdu,
        SkyRectangularAnnulus(pos_skycoord_s, 4 * u.deg, 6 * u.deg, 6 * u.deg,
                              0 * u.deg),
        method='subpixel',
        subpixels=20)

    assert_allclose(photometry_skycoord_rec['aperture_sum'][2],
                    photometry_skycoord_rec_s['aperture_sum'])

    assert np.all(photometry_skycoord_rec['aperture_sum'] >
                  photometry_skycoord_circ['aperture_sum'])

    assert_allclose(photometry_skycoord_rec_ann['aperture_sum'][2],
                    photometry_skycoord_rec_ann_s['aperture_sum'])

    assert_allclose(photometry_skycoord_rec_ann['aperture_sum'],
                    photometry_skycoord_rec['aperture_sum'] -
                    photometry_skycoord_rec_4['aperture_sum'],
                    rtol=1e-4)
Example #52
0
    def to_wcs(self, use_full_header=False, target_image=None):
        """
        Convert AVM projection information into a Astropy WCS object.

        Parameters
        ----------
        use_full_header : bool, optional
            Whether to use the full embedded Header if available. If set to
            `False`, the WCS is determined from the regular AVM keywords.
        target_image : str, optional
            In some cases, the dimensions of the image containing the AVM/WCS
            information is different from the dimensions of the image for which
            the AVM was defined. The `target_image` option can be used to pass
            the path of an image from which the size will be used to re-scale
            the WCS.
        """

        if not astropy_installed:
            raise Exception("Astropy is required to use to_wcs()")

        if repr(self.Spatial) == '':
            raise NoSpatialInformation(
                "AVM meta-data does not contain any spatial information")

        if use_full_header and self.Spatial.FITSheader is not None:
            print("Using full FITS header from Spatial.FITSheader")
            header = fits.Header(txtfile=BytesIO(self.Spatial.FITSheader))
            return WCS(header)

        # Initializing WCS object
        wcs = WCS(naxis=2)

        # Find the coordinate type
        if self.Spatial.CoordinateFrame is not None:
            ctype = self.Spatial.CoordinateFrame
        else:
            warnings.warn("Spatial.CoordinateFrame not found, assuming ICRS")
            ctype = 'ICRS'

        if ctype in ['ICRS', 'FK5', 'FK4']:
            xcoord = "RA--"
            ycoord = "DEC-"
            wcs.wcs.radesys = ctype.encode('ascii')
        elif ctype in ['ECL']:
            xcoord = "ELON"
            ycoord = "ELAT"
        elif ctype in ['GAL']:
            xcoord = "GLON"
            ycoord = "GLAT"
        elif ctype in ['SGAL']:
            xcoord = "SLON"
            ycoord = "SLAT"
        else:
            raise Exception("Unknown coordinate system: %s" % ctype)

        # Find the projection type
        cproj = ('%+4s' % self.Spatial.CoordsystemProjection).replace(' ', '-')

        wcs.wcs.ctype[0] = (xcoord + cproj).encode('ascii')
        wcs.wcs.ctype[1] = (ycoord + cproj).encode('ascii')

        # Find the equinox
        if self.Spatial.Equinox is None:
            warnings.warn("Spatial.Equinox is not present, assuming 2000")
            wcs.wcs.equinox = 2000.
        elif type(self.Spatial.Equinox) is str:
            if self.Spatial.Equinox == "J2000":
                wcs.wcs.equinox = 2000.
            elif self.Spatial.Equinox == "B1950":
                wcs.wcs.equinox = 1950.
            else:
                try:
                    wcs.wcs.equinox = float(self.Spatial.Equinox)
                except ValueError:
                    raise ValueError("Unknown equinox: %s" %
                                     self.Spatial.Equinox)
        else:
            wcs.wcs.equinox = float(self.Spatial.Equinox)

        # Set standard WCS parameters
        if self.Spatial.ReferenceDimension is not None:
            wcs_naxis1, wcs_naxis2 = self.Spatial.ReferenceDimension
            if hasattr(wcs, 'naxis1'):  # PyWCS and Astropy < 0.4
                wcs.naxis1, wcs.naxis2 = wcs_naxis1, wcs_naxis2
        else:
            wcs_naxis1, wcs_naxis2 = None, None

        wcs.wcs.crval = self.Spatial.ReferenceValue
        wcs.wcs.crpix = self.Spatial.ReferencePixel

        if self.Spatial.CDMatrix is not None:
            wcs.wcs.cd = [
                self.Spatial.CDMatrix[0:2], self.Spatial.CDMatrix[2:4]
            ]
        elif self.Spatial.Scale is not None:
            # AVM Standard 1.2:
            #
            # "The scale should follow the standard FITS convention for sky
            # projections in which the first element is negative (indicating
            # increasing RA/longitude to the left) and the second is positive.
            # In practice, only the absolute value of the first term should be
            # necessary to identify the pixel scale since images should always
            # be presented in an undistorted 1:1 aspect ratio as they appear in
            # the sky when viewed from Earth.This field can be populated from
            # the FITS keywords: CDELT1, CDELT2 (or derived from CD matrix)."
            #
            # Therefore, we have to enforce the sign of CDELT:
            wcs.wcs.cdelt[0] = -abs(self.Spatial.Scale[0])
            wcs.wcs.cdelt[1] = +abs(self.Spatial.Scale[1])
            if self.Spatial.Rotation is not None:
                wcs.wcs.crota = self.Spatial.Rotation, self.Spatial.Rotation

        # If `target_image` is set, we have to rescale the reference pixel and
        # the scale
        if target_image is not None:

            # Find target image size
            from PIL import Image
            nx, ny = Image.open(target_image).size

            if self.Spatial.ReferenceDimension is None:
                raise ValueError(
                    "Spatial.ReferenceDimension should be set in order to determine scale in target image"
                )

            # Find scale in x and y
            scale_x = nx / float(wcs_naxis1)
            scale_y = ny / float(wcs_naxis2)

            # Check that scales are consistent
            if abs(scale_x - scale_y) / (scale_x + scale_y) * 2. < 0.01:
                scale = scale_x
            else:
                raise ValueError(
                    "Cannot scale WCS to target image consistently in x and y direction"
                )

            wcs.wcs.cdelt /= scale
            wcs.wcs.crpix *= scale

            if hasattr(wcs, 'naxis1'):  # PyWCS and Astropy < 0.4
                wcs.naxis1 = nx
                wcs.naxis2 = ny

        return wcs
Example #53
0
def reindex_wcs(wcs, inds):
    """
    Re-index a WCS given indices.  The number of axes may be reduced.

    Parameters
    ----------
    wcs: astropy.wcs.WCS
        The WCS to be manipulated
    inds: np.array(dtype='int')
        The indices of the array to keep in the output.
        e.g. swapaxes: [0,2,1,3]
        dropaxes: [0,1,3]
    """

    if not isinstance(inds, np.ndarray):
        raise TypeError("Indices must be an ndarray")

    if inds.dtype.kind != 'i':
        raise TypeError('Indices must be integers')

    outwcs = WCS(naxis=len(inds))
    for par in wcs_parameters_to_preserve:
        setattr(outwcs.wcs, par, getattr(wcs.wcs, par))

    cdelt = wcs.wcs.get_cdelt()
    pc = wcs.wcs.get_pc()

    outwcs.wcs.crpix = wcs.wcs.crpix[inds]
    outwcs.wcs.cdelt = cdelt[inds]
    outwcs.wcs.crval = wcs.wcs.crval[inds]
    outwcs.wcs.cunit = [wcs.wcs.cunit[i] for i in inds]
    outwcs.wcs.ctype = [wcs.wcs.ctype[i] for i in inds]
    outwcs.wcs.cname = [wcs.wcs.cname[i] for i in inds]
    outwcs.wcs.pc = pc[inds[:, None], inds[None, :]]


    matched_projections = [prj for prj in wcs_projections if any(prj in x for x in outwcs.wcs.ctype)]
    matchproj_count = [sum(prj in x for x in outwcs.wcs.ctype) for prj in matched_projections]
    if any(n == 1 for n in matchproj_count):
        # unmatched celestial axes = there is only one of them
        for prj in matched_projections:
            match = [prj in ct for ct in outwcs.wcs.ctype].index(True)
            outwcs.wcs.ctype[match] = outwcs.wcs.ctype[match].split("-")[0]
            warnings.warn("Slicing across a celestial axis results "
                          "in an invalid WCS, so the celestial "
                          "projection ({0}) is being removed.  "
                          "The WCS indices being kept were {1}."
                          .format(prj, inds),
                          WCSWarning)

    pv_cards = []
    for i, j in enumerate(inds):
        for k, m, v in wcs.wcs.get_pv():
            if k == j:
                pv_cards.append((i, m, v))
    outwcs.wcs.set_pv(pv_cards)

    ps_cards = []
    for i, j in enumerate(inds):
        for k, m, v in wcs.wcs.get_ps():
            if k == j:
                ps_cards.append((i, m, v))
    outwcs.wcs.set_ps(ps_cards)


    outwcs.wcs.set()

    return outwcs
Example #54
0
class CASjobs_sources(object):
    """
    Query an entire image region for sources, on casjobs. Takes the image WCS to calculate
    image bounds and search area.

    Inputs
    ------
        info : wcs/str/header
            the wcs, file name, or header for the image to query the region of.
    Options
    -------
        maglim : float
            magnitude limit of the query 
        band : str
            ps1 band to do the magnitude cut, doesn't matter for gaia
        context : str
            casjobs query context, currently only ps1 and gaia are avaialble 
        name : str
            name of the database that will be used to save on casjobs and locally 
        path : str
            path to the save directory 
    """
    def __init__(self,
                 info=None,
                 ra=None,
                 dec=None,
                 rad=None,
                 maglim=20,
                 band='i',
                 context='ps1',
                 name=None,
                 path='./'):
        if info is not None:
            if type(info) == str:
                self.wcs = WCS(info)
            elif type(info) == wcs_class:
                self.wcs = info
            elif type(info) == header_class:
                self.wcs = WCS(info)
        elif (ra is None) | (dec is None) | (rad is None):
            raise ValueError(
                'if no wcs info is provided then ra, dec and rad MUST be provided.'
            )
        self.ra = ra
        self.dec = dec
        self.rad = rad
        self.context = context
        self.name = name
        self.maglim = maglim
        self.band = band
        self.table = None
        self.query = None
        self.path = path

    def get_coords(self):
        """
        Get the centre coordinates and query radius from the wcs 
        """
        if (self.ra is None) | (self.dec is None) | (self.rad is None):
            dim1, dim2 = self.wcs.array_shape
            centre = [dim1 // 2, dim2 // 2]
            self.ra, self.dec = self.wcs.all_pix2world(centre[0], centre[1], 0)

            size = np.sqrt((dim1 - centre[0])**2 + (dim2 - centre[1])**2) + 50
            pix_size = np.max(abs(self.wcs.pixel_scale_matrix))
            self.rad = size * pix_size * 60  # size in arc minutes
        return

    def _check_params(self):
        """
        Check all relevant variables are defined
        """
        m = ''
        message = "\n {} not defined"
        if self.name is None:
            print(message.format('name') + ' assigning default name.')
            self.name = 'default'
        if self.ra is None:
            m += message.format('ra')
        if self.dec is None:
            m += message.format('dec')
        if self.rad is None:
            m += message.format('rad')
        if len(m) > 2:
            raise ValueError(m)
        return

    def get_query(self):
        """
        Get the query string to submit to casjobs
        """

        self._check_params()

        if self.context.lower() == 'ps1':
            ps1_query = """
                        select 
                        o.raMean, o.decMean,o.gMeanPSFMag,o.rMeanPSFMag,
                        o.iMeanPSFMag,o.zMeanPSFMag,o.yMeanPSFMag,o.iMeanKronMag
                        into mydb.[{dbname}]
                        from fGetNearbyObjEq({ra},{dec},{rad}) x
                        JOIN MeanObjectView o on o.ObjID=x.ObjId
                        LEFT JOIN StackObjectAttributes AS soa ON soa.objID = x.objID
                        WHERE o.nDetections > 5
                        AND soa.primaryDetection > 0
                        AND o.{band}MeanPSFMag < {maglim}
                        """
            self.name = 'ps1_' + self.name
            self.query = ps1_query.format(dbname=self.name,
                                          ra=self.ra,
                                          dec=self.dec,
                                          rad=self.rad,
                                          band=self.band,
                                          maglim=self.maglim)
        if self.context.lower() == 'gaia':
            gaia_query = """
                         select T.ra,T.dec,T.phot_g_mean_mag as gaia
                         into mydb.[{dbname}]
                         from fGetNearbyObjEq({ra},{dec},{rad}) as n
                         join GAIApublicVOview T on T.source_id = n.objID
                         WHERE T.phot_g_mean_mag < {maglim}
                         """
            self.name = 'gaia_' + self.name
            self.query = gaia_query.format(dbname=self.name,
                                           ra=self.ra,
                                           dec=self.dec,
                                           rad=self.rad,
                                           band=self.band,
                                           maglim=self.maglim)
        return

    def submit_query(self, reset=True):
        """
        Submit the query and download the resulting table
        """
        if self.context == 'ps1':
            c = 'PanSTARRS_DR2'

        elif self.context == 'gaia':
            c = 'GAIA_DR2'

        else:
            raise ValueError('Only gaia and ps1 available now.')

        jobs = mastcasjobs.MastCasJobs(context=c)
        if reset:
            jobs.drop_table_if_exists(self.name)
        else:
            try:
                self.table = jobs.get_table(self.name,
                                            format='CSV').to_pandas()
                if self.context == 'ps1':
                    self.table = self.table.replace(-999, np.nan)
                print('loading existing table')
                return
            except:
                pass

        job_id = jobs.submit(self.query)
        status = jobs.monitor(job_id)
        print(status)
        if status[0] != 5:
            raise ValueError('No table created')
        self.table = jobs.get_table(self.name, format='CSV').to_pandas()

        if self.context == 'ps1':
            self.table = self.table.replace(-999, np.nan)

        return

    def save_space(self):
        """
        Creates a path if it doesn't already exist.
        """
        try:
            if not os.path.exists(self.path):
                os.makedirs(self.path)
        except FileExistsError:
            pass
        return

    def save_table(self, save):
        """
        Save the query output 
        """
        self.save_space()
        self.table.to_csv(save + '.csv', index=False)

    def get_table(self, reset=False, save=None):
        """
        Runs all functions to get the table.
        """
        if save is not None:
            self.name = save
        self.get_coords()
        self.get_query()
        self.submit_query(reset=reset)
        if save is not None:
            self.save_table(self.name)
        return
Example #55
0
def spectral_cube_wcs(request):
    # A simple spectral cube WCS used by some tests
    wcs = WCS(naxis=3)
    wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'
    wcs.wcs.set()
    return wcs
Example #56
0
CRVAL1  =                180.0
CDELT1  =                 -0.4
CUNIT1  = 'deg     '
CTYPE2  = 'DEC--MOL'
CRPIX2  =                  400
CRVAL2  =                  0.0
CDELT2  =                  0.4
CUNIT2  = 'deg     '
COORDSYS= 'icrs    '
""",
                                       sep='\n')
array, footprint = reproject_from_healpix(filename_ligo, target_header)
from astropy.wcs import WCS
import matplotlib.pyplot as plt

ax = plt.subplot(1, 1, 1, projection=WCS(target_header))
#ax.imshow(array, vmin=0, vmax=1.e-8)
#ax.coords.grid(color='white')
ax.coords.frame.set_color('none')
import numpy as np

np.random.seed(19680801)

x = 30 * np.random.randn(10000)
mu = x.mean()
median = np.median(x)
sigma = x.std()
textstr = '\n'.join(
    (r'$\mu=%.2f$' % (mu, ), r'$\mathrm{median}=%.2f$' % (median, ),
     r'$\sigma=%.2f$' % (sigma, )))
Example #57
0
CTYPE3  = GLON-CAR
CRVAL1  = 10
CRVAL2  = 20
CRVAL3  = 25
CRPIX1  = 30
CRPIX2  = 40
CRPIX3  = 45
CDELT1  = -0.1
CDELT2  =  0.5
CDELT3  =  0.1
CUNIT1  = deg
CUNIT2  = Hz
CUNIT3  = deg
"""

WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
WCS_SPECTRAL_CUBE.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]


@pytest.mark.parametrize(
    "item, ndim, expected",
    (([Ellipsis, 10], 4, [slice(None)] * 3 + [10]),
     ([10, slice(20, 30)], 5, [10, slice(20, 30)] + [slice(None)] * 3),
     ([10, Ellipsis, 8], 10, [10] + [slice(None)] * 8 + [8])))
def test_sanitize_slice(item, ndim, expected):
    new_item = sanitize_slices(item, ndim)
    # FIXME: do we still need the first two since the third assert
    # should cover it all?
    assert len(new_item) == ndim
    assert all(isinstance(i, (slice, int)) for i in new_item)
    assert new_item == expected
Example #58
0
def process_file(filename, favor2=None, verbose=False, replace=False, dbname=None, dbhost=None, photodir='photometry'):
    #### Some parameters
    aper = 2.0
    bkgann = None
    order = 4
    bg_order = 4
    color_order = 2
    sn = 5

    if not posixpath.exists(filename):
        return None

    # Rough but fast checking of whether the file is already processed
    if not replace and posixpath.exists(photodir + '/' + filename.split('/')[-2] + '/' + posixpath.splitext(posixpath.split(filename)[-1])[0] + '.cat'):
        return

    #### Preparation
    header = fits.getheader(filename, -1)

    if header['TYPE'] not in ['survey', 'imaging', 'widefield', 'Swift', 'Fermi', 'test']:
        return

    channel = header.get('CHANNEL ID')
    fname = header.get('FILTER', 'unknown')
    time = parse_time(header['TIME'])
    shutter = header.get('SHUTTER', -1)

    if fname not in ['Clear']:
        return

    if fname == 'Clear':
        effective_fname = 'V'
    else:
        effective_fname = fname

    night = get_night(time)

    dirname = '%s/%s' % (photodir, night)
    basename = posixpath.splitext(posixpath.split(filename)[-1])[0]
    basename = dirname + '/' + basename
    catname = basename + '.cat'

    if not replace and posixpath.exists(catname):
        return

    if verbose:
        print(filename, channel, night, fname, effective_fname)

    image = fits.getdata(filename, -1).astype(np.double)

    if favor2 is None:
        favor2 = Favor2(dbname=options.db, dbhost=options.dbhost)

    #### Basic calibration
    darkname = favor2.find_image('masterdark', header=header, debug=False)
    flatname = favor2.find_image('masterflat', header=header, debug=False)

    if darkname:
        dark = fits.getdata(darkname)
    else:
        dark = None

    if flatname:
        flat = fits.getdata(flatname)
    else:
        flat = None

    if dark is None or flat is None:
        survey.save_objects(catname, None)
        return

    image,header = calibrate.calibrate(image, header, dark=dark)

    # Check whether the calibration failed
    if 'SATURATE' not in header:
        print('Calibration failed for', filename)
        survey.save_objects(catname, None)
        return

    #### Basic masking
    mask = image > 0.9*header['SATURATE']
    fmask = ~np.isfinite(flat) | (flat < 0.5)
    dmask = dark > 10.0*mad_std(dark) + np.nanmedian(dark)

    if header.get('BLEMISHCORRECTION', 1):
        # We have to mask blemished pixels
        blemish = fits.getdata('calibrations/blemish_shutter_%d_channel_%d.fits' % (header['SHUTTER'], header['CHANNEL ID']))
        if verbose:
            print(100*np.sum(blemish>0)/blemish.shape[0]/blemish.shape[1], '% pixels blemished')
        dmask |= blemish > 0

    image[~fmask] *= np.median(flat[~fmask])/flat[~fmask]

    #### WCS
    wcs = WCS(header)
    pixscale = np.hypot(wcs.pixel_scale_matrix[0,0], wcs.pixel_scale_matrix[0,1])
    gain = 0.67 if header.get('SHUTTER') == 0 else 1.9

    #### Background mask
    mask_bg = np.zeros_like(mask)
    mask_segm = np.zeros_like(mask)

    # bg2 = sep.Background(image, mask=mask|mask_bg, bw=64, bh=64)

    # for _ in xrange(3):
    #     bg1 = sep.Background(image, mask=mask|mask_bg, bw=256, bh=256)

    #     ibg = bg2.back() - bg1.back()

    #     tmp = np.abs(ibg - np.median(ibg)) > 5.0*mad_std(ibg)
    #     mask_bg |= survey.dilate(tmp, np.ones([50, 50]))

    # mask_bg = survey.dilate(tmp, np.ones([50, 50]))

    # Large objects?..
    bg = sep.Background(image, mask=mask|dmask|fmask|mask_bg|mask_segm, bw=128, bh=128)
    image1 = image - bg.back()
    obj0,segm = sep.extract(image1, err=bg.rms(), thresh=10, minarea=10, mask=mask|dmask|fmask|mask_bg, filter_kernel=None, clean=False, segmentation_map=True)

    mask_segm = np.isin(segm, [_+1 for _,npix in enumerate(obj0['npix']) if npix > 500])
    mask_segm = survey.dilate(mask_segm, np.ones([20, 20]))

    if np.sum(mask_bg|mask_segm|mask|fmask|dmask)/mask_bg.shape[0]/mask_bg.shape[1] > 0.4:
        print(100*np.sum(mask_bg|mask_segm|mask|fmask|dmask)/mask_bg.shape[0]/mask_bg.shape[1], '% of image masked, skipping', filename)
        survey.save_objects(catname, None)
        return
    elif verbose:
        print(100*np.sum(mask_bg|mask_segm|mask|fmask|dmask)/mask_bg.shape[0]/mask_bg.shape[1], '% of image masked')

    # Frame footprint at +10 pixels from the edge
    ra,dec = wcs.all_pix2world([10, 10, image.shape[1]-10, image.shape[1]-10], [10, image.shape[0]-10, image.shape[0]-10, 10], 0)
    footprint = "(" + ",".join(["(%g,%g)" % (_,__) for _,__ in zip(ra, dec)]) + ")"

    #### Catalogue
    ra0,dec0,sr0 = survey.get_frame_center(header=header)
    cat = favor2.get_stars(ra0, dec0, sr0, catalog='gaia', extra=['g<14', 'q3c_poly_query(ra, dec, \'%s\'::polygon)' % footprint], limit=1000000)

    if verbose:
        print(len(cat['ra']), 'star positions from Gaia down to g=%.1f mag' % np.max(cat['g']))

    ## Detection of blended and not really needed stars in the catalogue
    h = htm.HTM(10)
    m = h.match(cat['ra'], cat['dec'], cat['ra'], cat['dec'], 2.0*aper*pixscale, maxmatch=0)
    m = [_[m[2]>1e-5] for _ in m]

    blended = np.zeros_like(cat['ra'], dtype=np.bool)
    notneeded = np.zeros_like(cat['ra'], dtype=np.bool)

    for i1,i2,dist in zip(*m):
        if dist*3600 > 0.5*aper*pixscale:
            if cat['g'][i1] - cat['g'][i2] < 3:
                blended[i1] = True
                blended[i2] = True
            else:
                # i1 is fainter by more than 3 mag
                notneeded[i1] = True

        if dist*3600 < 0.5*aper*pixscale:
            if cat['g'][i1] > cat['g'][i2]:
                notneeded[i1] = True

    cat,blended = [_[~notneeded] for _ in cat,blended]

    #### Background subtraction
    bg = sep.Background(image, mask=mask|dmask|fmask|mask_bg|mask_segm, bw=128, bh=128)
    # bg = sep.Background(image, mask=mask|dmask|fmask|mask_bg|mask_segm, bw=32, bh=32)
    image1 = image - bg.back()

    #### Detection of all objects on the frame
    obj0,segm = sep.extract(image1, err=bg.rms(), thresh=2, minarea=3, mask=mask|dmask|fmask|mask_bg|mask_segm, filter_kernel=None, clean=False, segmentation_map=True)
    obj0 = obj0[(obj0['x'] > 10) & (obj0['y'] > 10) & (obj0['x'] < image.shape[1]-10) & (obj0['y'] < image.shape[0]-10)]
    obj0 = obj0[obj0['flag'] <= 1] # We keep only normal and blended oblects

    fields = ['ra', 'dec', 'fluxerr', 'mag', 'magerr', 'flags', 'cat']
    obj0 = np.lib.recfunctions.append_fields(obj0, fields, [np.zeros_like(obj0['x'], dtype=np.int if _ in ['flags', 'cat'] else np.double) for _ in fields], usemask=False)
    obj0['ra'],obj0['dec'] = wcs.all_pix2world(obj0['x'], obj0['y'], 0)
    obj0['flags'] = obj0['flag']

    if verbose:
        print(len(obj0['x']), 'objects detected on the frame')

    ## Filter out objects not coincident with catalogue positions
    h = htm.HTM(10)
    m = h.match(obj0['ra'], obj0['dec'], cat['ra'], cat['dec'], aper*pixscale)

    nidx = np.isin(np.arange(len(obj0['ra'])), m[0], invert=True)
    obj0 = obj0[nidx]

    if verbose:
        print(len(obj0['x']), 'are outside catalogue apertures')

    # Catalogue stars
    xc,yc = wcs.all_world2pix(cat['ra'], cat['dec'], 0)
    obj = {'x':xc, 'y':yc, 'ra':cat['ra'], 'dec':cat['dec']}

    obj['flags'] = np.zeros_like(xc, dtype=np.int)
    obj['flags'][blended] |= FLAG_BLENDED

    obj['cat'] = np.ones_like(xc, dtype=np.int)

    for _ in ['mag', 'magerr', 'flux', 'fluxerr']:
        obj[_] = np.zeros_like(xc)

    # Merge detected objects
    for _ in ['x', 'y', 'ra', 'dec', 'flags', 'mag', 'magerr', 'flux', 'fluxerr', 'cat']:
        obj[_] = np.concatenate((obj[_], obj0[_]))

    if verbose:
        print(len(obj['x']), 'objects for photometry')

    # Simple aperture photometry
    obj['flux'],obj['fluxerr'],flag = sep.sum_circle(image1, obj['x'], obj['y'], aper, err=bg.rms(), gain=gain, mask=mask|dmask|fmask|mask_bg|mask_segm, bkgann=bkgann)
    obj['flags'] |= flag
    # Normalize flags
    obj['flags'][obj['flags'] & sep.APER_TRUNC] |= FLAG_TRUNCATED
    obj['flags'][obj['flags'] & sep.APER_ALLMASKED] |= FLAG_MASKED
    obj['flags'] &= FLAG_NORMAL | FLAG_BLENDED | FLAG_TRUNCATED | FLAG_MASKED | FLAG_NO_BACKGROUND | FLAG_BAD_CALIBRATION

    area,_,_ = sep.sum_circle(np.ones_like(image1), obj['x'], obj['y'], aper, err=bg.rms(), gain=gain, mask=mask|dmask|fmask|mask_bg|mask_segm, bkgann=bkgann)

    # Simple local background estimation
    bgflux,bgfluxerr,bgflag = sep.sum_circann(image1, obj['x'], obj['y'], 10, 15, err=bg.rms(), gain=gain, mask=mask|dmask|fmask|mask_bg|mask_segm|(segm>0))
    bgarea,_,_ = sep.sum_circann(np.ones_like(image1), obj['x'], obj['y'], 10, 15, err=bg.rms(), gain=gain, mask=mask|dmask|fmask|mask_bg|mask_segm|(segm>0))

    bgidx = np.isfinite(bgarea) & np.isfinite(area)
    bgidx[bgidx] &= (bgarea[bgidx] > 10) & (area[bgidx] > 1)

    obj['flux'][bgidx] -= bgflux[bgidx]*area[bgidx]/bgarea[bgidx]
    obj['flags'][~bgidx] |= FLAG_NO_BACKGROUND # No local background

    obj['deltabgflux'] = np.zeros_like(obj['x'])
    obj['deltabgflux'][bgidx] = bgflux[bgidx]*area[bgidx]/bgarea[bgidx]

    fidx = np.isfinite(obj['flux']) & np.isfinite(obj['fluxerr'])
    fidx[fidx] &= (obj['flux'][fidx] > 0)

    obj['mag'][fidx] = -2.5*np.log10(obj['flux'][fidx])
    obj['magerr'][fidx] = 2.5/np.log(10)*obj['fluxerr'][fidx]/obj['flux'][fidx]

    fidx[fidx] &= (obj['magerr'][fidx] > 0)
    fidx[fidx] &= 1/obj['magerr'][fidx] > sn

    for _ in obj.keys():
        if hasattr(obj[_], '__len__'):
            obj[_] = obj[_][fidx]

    obj['aper'] = aper

    if verbose:
        print(len(obj['x']), 'objects with S/N >', sn)

    if len(obj['x']) < 1000:
        print('Only', len(obj['x']), 'objects on the frame, skipping', filename)
        survey.save_objects(catname, None)
        return

    obj['fwhm'] = 2.0*sep.flux_radius(image1, obj['x'], obj['y'], 2.0*aper*np.ones_like(obj['x']), 0.5, mask=mask|dmask|fmask|mask_bg|mask_segm)[0]

    #### Check FWHM of all objects and select only 'good' ones
    idx = obj['flags'] == 0
    idx &= obj['magerr'] < 1/20

    fwhm0 = survey.fit_2d(obj['x'][idx], obj['y'][idx], obj['fwhm'][idx], obj['x'], obj['y'], weights=1/obj['magerr'][idx])

    fwhm_idx = np.abs(obj['fwhm'] - fwhm0 - np.median((obj['fwhm'] - fwhm0)[idx])) < 3.0*mad_std((obj['fwhm'] - fwhm0)[idx])
    obj['flags'][~fwhm_idx] |= FLAG_BLENDED

    #### Catalogue matching
    idx = obj['flags']
    m = htm.HTM(10).match(obj['ra'], obj['dec'], cat['ra'], cat['dec'], 1e-5)
    fidx = np.in1d(np.arange(len(cat['ra'])), m[1]) # Stars that got successfully measured and not blended

    cidx = (cat['good'] == 1) & (cat['var'] == 0)
    cidx &= np.isfinite(cat['B']) & np.isfinite(cat['V']) # & np.isfinite(cat['lum'])
    cidx[cidx] &= ((cat['B'] - cat['V'])[cidx] > -0.5) & ((cat['B'] - cat['V'])[cidx] < 2.0)
    # cidx[cidx] &= (cat['lum'][cidx] > 0.3) & (cat['lum'][cidx] < 30)

    if np.sum(cidx & fidx & (cat['multi_70'] == 0)) > 2000:
        cidx &= (cat['multi_70'] == 0)
        obj['cat_multi'] = 70
    elif np.sum(cidx & fidx & (cat['multi_45'] == 0)) > 1000:
        cidx &= (cat['multi_45'] == 0)
        obj['cat_multi'] = 45
    else:
        cidx &= (cat['multi_30'] == 0)
        obj['cat_multi'] = 30

    if verbose:
            print(np.sum(obj['flags'] == 0), 'objects without flags')
            print('Amount of good stars:',
                  np.sum(cidx & fidx & (cat['multi_70'] == 0)),
                  np.sum(cidx & fidx & (cat['multi_45'] == 0)),
                  np.sum(cidx & fidx & (cat['multi_30'] == 0)))
            print('Using %d arcsec avoidance radius' % obj['cat_multi'])

    # We match with very small SR to only account for manually placed apertures
    if verbose:
        print('Trying full fit:', len(obj['x']), 'objects,', np.sum(cidx), 'stars')

    match = Match(width=image.shape[1], height=image.shape[0])

    prev_ngoodstars = len(obj['x'])

    for iter in range(10):
        if not match.match(obj=obj, cat=cat[cidx], sr=1e-5, filter_name='V', order=order, bg_order=bg_order, color_order=color_order, verbose=False) or match.ngoodstars < 500:
            if verbose:
                print(match.ngoodstars, 'good matches, matching failed for', filename)
            survey.save_objects(catname, None)
            return

        if verbose:
            print(match.ngoodstars, 'good matches, std =', match.std)

        if match.ngoodstars == prev_ngoodstars:
            if verbose:
                print('Converged on iteration', iter)
            break
        prev_ngoodstars = match.ngoodstars

        # Match good objects with stars
        oidx = obj['flags'] == 0
        oidx1,cidx1,dist1 = htm.HTM(10).match(obj['ra'][oidx], obj['dec'][oidx], cat['ra'][cidx], cat['dec'][cidx], 1e-5)

        x = obj['x'][oidx][oidx1]
        y = obj['y'][oidx][oidx1]
        cbv = match.color_term[oidx][oidx1]
        cbv2 = match.color_term2[oidx][oidx1]
        cbv3 = match.color_term3[oidx][oidx1]
        bv = (cat['B'] - cat['V'])[cidx][cidx1]
        cmag = cat[match.cat_filter_name][cidx][cidx1]
        mag = match.mag[oidx][oidx1] + bv*cbv + bv**2*cbv2 + bv**3*cbv3
        magerr = np.hypot(obj['magerr'][oidx][oidx1], 0.02)

        dmag = mag-cmag
        ndmag = ((mag-cmag)/magerr)

        idx = cmag < match.mag_limit[oidx][oidx1]

        x,y,cbv,cbv2,cbv3,bv,cmag,mag,magerr,dmag,ndmag = [_[idx] for _ in [x,y,cbv,cbv2,cbv3,bv,cmag,mag,magerr,dmag,ndmag]]

        # Match all objects with good objects
        xy = np.array([x,y]).T
        xy0 = np.array([obj['x'], obj['y']]).T

        kd = cKDTree(xy)

        dist,m = kd.query(xy0, 101)
        dist = dist[:,1:]
        m = m[:,1:]

        vchi2 = mad_std(ndmag[m]**2, axis=1)

        # Mark regions of too sparse or too noisy matches as bad
        obj['flags'][vchi2 > 5] |= FLAG_BAD_CALIBRATION
        # obj['flags'][vchi2 > np.median(vchi2) + 5.0*mad_std(vchi2)] |= FLAG_BAD_CALIBRATION
        obj['flags'][dist[:,10] > np.median(dist[:,10]) + 10.0*mad_std(dist[:,10])] |= FLAG_BAD_CALIBRATION

    match.good_idx = (obj['flags'] & FLAG_BAD_CALIBRATION) == 0
    if verbose:
        print(np.sum(match.good_idx), 'of', len(match.good_idx), 'stars are good')

    #### Store objects to file
    try:
        os.makedirs(dirname)
    except:
        pass

    obj['mag_limit'] = match.mag_limit
    obj['color_term'] = match.color_term
    obj['color_term2'] = match.color_term2
    obj['color_term3'] = match.color_term3

    obj['filename'] = filename
    obj['night'] = night
    obj['channel'] = channel
    obj['filter'] = fname
    obj['cat_filter'] = match.cat_filter_name
    obj['time'] = time

    obj['mag_id'] = match.mag_id

    obj['good_idx'] = match.good_idx
    obj['calib_mag'] = match.mag
    obj['calib_magerr'] = match.magerr

    obj['std'] = match.std
    obj['nstars'] = match.ngoodstars

    survey.save_objects(catname, obj, header=header)
def mkpy3_finder_chart_survey_fits_image_get_v1(
  ra_deg=None,
  dec_deg=None,
  radius_arcmin=None,
  survey=None,
  cframe=None,
  verbose=None
):
    """
Function: mkpy3_finder_chart_survey_fits_image_get_v1()

Purpose:

Gets sky survey image data around a position on the sky.

Parameters
----------
ra_deg : float (optional)
    right ascencsion [deg]
dec_deg : float (optional)
    declination [deg]
radius_arcmin : float (optional)
    radius (halfwidth and halfheight of image) [arcmin]
survey : string (optional) [e.g., '2MASS-J', 'DSS2 Red', etc.]
    survey string name
cframe : str (optional)
    coordinate frame name [e.g., 'fk5', 'icrs', etc.]
verbose : bool (optional)
    if True, print extra information

Returns
-------
hdu :
    Header/Data Unit (HDU) of the survey FITS file
hdr :
    header associated with hdu
data :
    data associated with hdu
wcs :
    World Coordinate System from hdu
cframe :
    coordinate frame of the survey data

# Kenneth Mighell
# Kepler Support Scientist
# NASA Ames Research Center / SETI Institute
    """
    import astropy.units as u
    from astropy.coordinates import SkyCoord
    from astroquery.skyview import SkyView
    from astropy.wcs import WCS
    #
    if (ra_deg is None):
        ra_deg = 291.41829  # Kepler-93b
    if (dec_deg is None):
        dec_deg = 38.67236  # Kepler-93b
    if (radius_arcmin is None):
        radius_arcmin = 1.99
    if (survey is None):
        survey = '2MASS-J'  # alternate: 'DSS2 Red'
        # ^--- to see all surveys: astroquery.skyview.SkyView.list_surveys()
    # pass:if
    if (cframe is None):
        cframe = 'fk5'  # N.B.: '2MASS-J' uses 'fk5'
    if (verbose is None):
        verbose = False
    #
    if (verbose):
        print(ra_deg, '=ra_deg')
        print(dec_deg, '=dec_deg')
        print(radius_arcmin, '=radius_arcmin')
        print("'%s' =survey" % (survey))
        print("'%s' =cframe" % (cframe))
        print(verbose, '=verbose')
        print()
    # pass#if
    #
    # sc <--- astropy sky coordinates
    sc = SkyCoord(ra=ra_deg * u.degree, dec=dec_deg * u.degree, frame=cframe)
    # image list  # assume that the list contains a single image
    imgl = SkyView.get_images(position=sc, survey=survey, radius=radius_arcmin * u.arcmin)
    #
    # outputs:
    hdu = imgl[0]        # Header/Data Unit of the FITS image
    hdr = hdu[0].header  # header associated with the HDU
    data = hdu[0].data   # data associated with the HDU
    wcs = WCS(hdr)       # World Coordinate System from the FITS header of the survey image
    #
    return hdu, hdr, data, wcs, cframe
    def test_overlay_coords(self, ignore_matplotlibrc, tmpdir):
        wcs = WCS(self.msx_header)

        fig = plt.figure(figsize=(4, 4))
        canvas = fig.canvas

        ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)
        fig.add_axes(ax)

        # On some systems, fig.canvas.draw is not enough to force a draw, so we
        # save to a temporary file.
        fig.savefig(tmpdir.join('test1.png').strpath)

        # Testing default displayed world coordinates
        string_world = ax._display_world_coords(0.523412, 0.518311)
        assert string_world == '0\xb029\'45" -0\xb029\'20" (world)'

        # Test pixel coordinates
        event1 = KeyEvent('test_pixel_coords', canvas, 'w')
        fig.canvas.key_press_event(event1.key, guiEvent=event1)
        string_pixel = ax._display_world_coords(0.523412, 0.523412)
        assert string_pixel == "0.523412 0.523412 (pixel)"

        event3 = KeyEvent('test_pixel_coords', canvas, 'w')
        fig.canvas.key_press_event(event3.key, guiEvent=event3)
        # Test that it still displays world coords when there are no overlay coords
        string_world2 = ax._display_world_coords(0.523412, 0.518311)
        assert string_world2 == '0\xb029\'45" -0\xb029\'20" (world)'

        overlay = ax.get_coords_overlay('fk5')

        # Regression test for bug that caused format to always be taken from
        # main world coordinates.
        overlay[0].set_major_formatter('d.ddd')

        # On some systems, fig.canvas.draw is not enough to force a draw, so we
        # save to a temporary file.
        fig.savefig(tmpdir.join('test2.png').strpath)

        event4 = KeyEvent('test_pixel_coords', canvas, 'w')
        fig.canvas.key_press_event(event4.key, guiEvent=event4)
        # Test that it displays the overlay world coordinates
        string_world3 = ax._display_world_coords(0.523412, 0.518311)

        assert string_world3 == '267.176\xb0 -28\xb045\'56" (world, overlay 1)'

        overlay = ax.get_coords_overlay(FK5())

        # Regression test for bug that caused format to always be taken from
        # main world coordinates.
        overlay[0].set_major_formatter('d.ddd')

        # On some systems, fig.canvas.draw is not enough to force a draw, so we
        # save to a temporary file.
        fig.savefig(tmpdir.join('test3.png').strpath)

        event5 = KeyEvent('test_pixel_coords', canvas, 'w')
        fig.canvas.key_press_event(event4.key, guiEvent=event4)
        # Test that it displays the overlay world coordinates
        string_world4 = ax._display_world_coords(0.523412, 0.518311)

        assert string_world4 == '267.176\xb0 -28\xb045\'56" (world, overlay 2)'

        overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030")))

        # Regression test for bug that caused format to always be taken from
        # main world coordinates.
        overlay[0].set_major_formatter('d.ddd')

        # On some systems, fig.canvas.draw is not enough to force a draw, so we
        # save to a temporary file.
        fig.savefig(tmpdir.join('test4.png').strpath)

        event6 = KeyEvent('test_pixel_coords', canvas, 'w')
        fig.canvas.key_press_event(event5.key, guiEvent=event6)
        # Test that it displays the overlay world coordinates
        string_world5 = ax._display_world_coords(0.523412, 0.518311)

        assert string_world5 == '267.652\xb0 -28\xb046\'23" (world, overlay 3)'