Esempio n. 1
0
def _generate_wcs_and_update_header(hdr):
    """
    Generate a WCS object from a header and remove the WCS-specific
    keywords from the header.

    Parameters
    ----------

    hdr : astropy.io.fits.header or other dict-like

    Returns
    -------

    new_header, wcs
    """

    # Try constructing a WCS object.
    try:
        wcs = WCS(hdr)
    except Exception as exc:
        # Normally WCS only raises Warnings and doesn't fail but in rare
        # cases (malformed header) it could fail...
        log.info(
            'An exception happened while extracting WCS informations from '
            'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
        return hdr, None
    # Test for success by checking to see if the wcs ctype has a non-empty
    # value, return None for wcs if ctype is empty.
    if not wcs.wcs.ctype[0]:
        return (hdr, None)

    new_hdr = hdr.copy()
    # If the keywords below are in the header they are also added to WCS.
    # It seems like they should *not* be removed from the header, though.

    wcs_header = wcs.to_header(relax=True)
    for k in wcs_header:
        if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
            new_hdr.remove(k, ignore_missing=True)

    # Check that this does not result in an inconsistent header WCS if the WCS
    # is converted back to a header.

    if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
        # The PCi_j representation is used by the astropy.wcs object,
        # so CDi_j keywords were not removed from new_hdr. Remove them now.
        for cd in _CDs:
            new_hdr.remove(cd, ignore_missing=True)

    # The other case -- CD in the header produced by astropy.wcs -- should
    # never happen based on [1], which computes the matrix in PC form.
    # [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
    #
    # The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
    # check for the possibility that both PC and CD are present in the result
    # so if the implementation of to_header changes in wcslib in the future
    # then the tests should catch it, and then this code will need to be
    # updated.

    # We need to check for any SIP coefficients that got left behind if the
    # header has SIP.
    if wcs.sip is not None:
        keyword = '{}_{}_{}'
        polynomials = ['A', 'B', 'AP', 'BP']
        for poly in polynomials:
            order = wcs.sip.__getattribute__(f'{poly.lower()}_order')
            for i, j in itertools.product(range(order), repeat=2):
                new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True)

    return (new_hdr, wcs)
# Here we create a WCS based on a heliographic
# Stonyhurst reference coordinate and with the CAR (plate carree) projection.

shape_out = [720, 1440]
frame_out = SkyCoord(0,
                     0,
                     unit=u.deg,
                     frame="heliographic_stonyhurst",
                     obstime=aia_map.date)
header = sunpy.map.make_fitswcs_header(
    shape_out,
    frame_out,
    scale=[180 / shape_out[0], 360 / shape_out[1]] * u.deg / u.pix,
    projection_code="CAR")

out_wcs = WCS(header)

###############################################################################
# With the new header, re-project the data into the new coordinate system.
# Here we are using the fastest but least accurate method of reprojection,
# `reproject.reproject_interp`, a more accurate but slower method is
# `reproject.reproject_adaptive`.

array, footprint = reproject_interp(aia_map, out_wcs, shape_out=shape_out)
outmap = sunpy.map.Map((array, header))
outmap.plot_settings = aia_map.plot_settings

###############################################################################
# Plot the result.

fig = plt.figure()
Esempio n. 3
0
def test_deafult_rotation(map_data, hpc_coord):
    header = make_fitswcs_header(map_data, hpc_coord)
    wcs = WCS(header)
    np.testing.assert_allclose(wcs.wcs.pc, [[1, 0], [0, 1]], atol=1e-5)
Esempio n. 4
0
def test_wcs():
    ccd_data = create_ccd_data()
    wcs = WCS(naxis=2)
    ccd_data.wcs = wcs
    assert ccd_data.wcs is wcs
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.io import fits
from astropy.wcs import WCS
from gammapy.maps import Map, WcsNDMap, WcsGeom
import os

# Warnings about XSPEC or DS9 can be ignored here
import sherpa.astro.ui as sh

# In[ ]:

# Read the fits file to load them in a sherpa model
filecounts = os.environ["GAMMAPY_DATA"] + "/sherpaCTA/G300-0_test_counts.fits"
hdr = fits.getheader(filecounts)
wcs = WCS(hdr)

sh.set_stat("cash")
sh.set_method("simplex")
sh.load_image(filecounts)
sh.set_coord("logical")

fileexp = os.environ["GAMMAPY_DATA"] + "/sherpaCTA/G300-0_test_exposure.fits"
filebkg = os.environ["GAMMAPY_DATA"] + "/sherpaCTA/G300-0_test_background.fits"
filepsf = os.environ["GAMMAPY_DATA"] + "/sherpaCTA/G300-0_test_psf.fits"
sh.load_table_model("expo", fileexp)
sh.load_table_model("bkg", filebkg)
sh.load_psf("psf", filepsf)

# In principle one might first want to fit the background amplitude. However the background estimation method already yields the correct normalization, so we freeze the background amplitude to unity instead of adjusting it. The (smoothed) residuals from this background model are then computed and shown.
Esempio n. 6
0
    infile = sys.argv[1]
except:
    "Please provide a configuration file"
    sys.exit(1)

print
config = get_config(infile)
tsfile = config['out'] + '/' + config['target']['name'] + '_' + config['file'][
    'tag'] + "_TSMap.fits"

tsimage = dict(label='TSMap', filename=tsfile)

# Determine image center and width / height
dpi = 2000
header = fits.getheader(tsimage['filename'])
wcs = WCS(header)
header['NAXIS1'] / dpi
header['NAXIS2'] / dpi
lon, lat = header['NAXIS1'] / 2., header['NAXIS2'] / 2.
x_center, y_center = wcs.wcs_pix2world(lon, lat, 0)
radius = header['CDELT2'] * header['NAXIS2'] / 2.

# Computing the sub-figure sizes is surprisingly hard
figsize = (5, 5)
figure = mpl.figure(figsize=figsize)

f = FITSFigure(tsimage['filename'], figure=figure)
f.recenter(x_center, y_center, 0.95 * radius)
set_hgps_style(f)
f.show_colorscale(vmin=1e-5, vmax=vmax, stretch=st[0], exponent=1,
                  cmap='jet')  #vmid=-3, stretch='log', )
Esempio n. 7
0
CRVAL5  =                   {stime:f} / Should be an AIPS time
CDELT5  =                  {dtime:f}'''.format(nant=Nantenna, ntimes=Ntimes, ra=np.rad2deg(phasecenter[0]), dec=np.rad2deg(phasecenter[1]), cfreq=150e6, dfreq=100e6, stime=stime, dtime=dtime)

# 4ch/4s data
# Frequency has length one, since it is TEC.
# FITS is transposed compared to Numpy.
data = np.zeros((Ntimes, 1, Nantenna, 256, 256), dtype=np.float32)

# Read in h5parm.
h5 = h5parm.h5parm(h5p)
ss = h5.getSolset('sol000')
st = ss.getSoltab('tec000')
h5_stations = list(st.getAxisValues('ant'))
# Find nearest pixel for a given direction.
H = fits.Header.fromstring(header, sep='\n')
wcs = WCS(H).celestial
directions = list(st.getAxisValues('dir'))
print(directions)
sources = ss.getSou()
RA = []
DEC = []
TEC = st.getValues()[0]
dirs = []
print(TEC.shape)
for d in directions:
    c = sources[d]
    diridx = directions.index(d)
    dirs.append(d)
    RAd, DECd = np.rad2deg(c)
    RA.append(RAd)
    DEC.append(DECd)
Esempio n. 8
0
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from matplotlib.patches import Rectangle

# importing data etc

fits_dir = "gass_smallest.fits"
hdu = fits.open(fits_dir)

hdr = hdu[0].header
data = hdu[0].data

fits.setval(fits_dir, "CUNIT1", value="deg")
fits.setval(fits_dir, "CUNIT2", value="deg")

wcs = WCS(hdr, naxis=2)

noise = np.zeros_like(data[0])

# noise is the average of brightness temperature from index 0 to 50, at each pixel (array)

for z_ind in range(0, len(data[:50])):
    noise += np.absolute(data[z_ind, :, :]) / len(range(0, len(data[:50])))

# sets all elements less than 3 times the noise to zero.

data[data < 3.0 * noise] = 0

# column density map: N = 1.823*(10**18) * sum(data) * dv (km/s)

dv = hdr["CDELT3"] / 1000
Esempio n. 9
0
def gpf(infile, mask_impath,
        aperture):  # aperture in arcmin and infile=cen_list

    ts = time()

    mask = fits.open(mask_impath)  # mask image
    w = WCS(mask_impath)  # wcs
    imdata = mask[0].data  # mask data
    mask.close()
    """CALCULATING THE PIXEL SCALE [pixels/arcmin]"""
    xscale = abs(mask[0].header['CD1_1']
                 )  # -2.3979657626886E-4 degrees in 1 pixels along the X axis
    yscale = abs(mask[0].header['CD2_2']
                 )  # 2.39763413722684E-4 degrees in 1 pixels along the Y axis
    xpixarcmin = (1 / (60 * xscale))  # pixels in 1 arcmin along the X axis
    ypixarcmin = (1 / (60 * yscale))  # pixels in 1 arcmin along the Y axis
    pix_arcmin = (xpixarcmin +
                  ypixarcmin) / 2.0  # average no. of pixels in 1 arcmin
    aperture = aperture * pix_arcmin  # converted the aperture in pixels
    print("\n" + '1 arcmin   = ' + str(pix_arcmin) + ' pixels' + "\n")
    print("aperture = " + str(aperture) + " pixels" + "\n")
    """CONVERTING Catalog COORDINATES INTO PIXELS"""
    pix_cat = np.zeros((len(infile), 3),
                       dtype=np.float64)  # col1=id, col2=xpix,col3=ypix
    for k in range(
            len(infile)):  # 1 removes the offset between pix and sky cord.
        x, y = w.wcs_world2pix(infile['ra'][k], infile['dec'][k], 0)
        pix_cat[k, 1] = x
        pix_cat[k, 2] = y

    pix_cat[:, 0] = np.arange(1, len(infile) + 1, 1, dtype=np.int)
    """DRAWING BOXES AROUND EACH centroid"""
    limit = np.zeros((len(pix_cat), 4), dtype=np.int)
    for i in range(len(pix_cat)):
        cmin = int(round(pix_cat[i, 1] -
                         aperture))  # obj,ra  (in pixels) - aper
        cmax = int(round(pix_cat[i, 1] +
                         aperture))  # obj,ra  (in pixels) + aper
        rmin = int(round(pix_cat[i, 2] -
                         aperture))  # obj,dec (in pixels) - aper
        rmax = int(round(pix_cat[i, 2] +
                         aperture))  # obj,dec (in pixels) - aper

        limit[i, :] = [cmin, cmax, rmin, rmax]
    """ITERATING OVER THE BOX TO CALCULATE GOOD PIXELS WITHIN THE APERTURE OF THE centroids"""
    n_pix = np.zeros(
        (len(pix_cat), 2),
        dtype=np.int)  # [col1->total pixels] & [col2->good pixels]
    for i in range(len(pix_cat)):
        if i % 50 == 0:
            print(i)
        for c in range(limit[i, 0],
                       limit[i, 1] + 1):  # accessing the corresponding box
            for r in range(limit[i, 2], limit[i, 3] + 1):
                dist = np.sqrt(((pix_cat[i, 1]) - c)**2 +
                               ((pix_cat[i, 2]) - r)**2)
                # distance of the pixels from center
                if dist <= aperture:
                    n_pix[i, 0] = n_pix[
                        i,
                        0] + 1  # counting total pixels inside the given aperture
                    if imdata[r,
                              c] == 0:  # checking if the pixel is good or bad
                        n_pix[i, 1] = n_pix[i, 1] + 1  # counting good pixels
    """CALCULATING RATIO OF GOOD PIXELS TO TOTAL PIXELS: GPF """
    ratio = []
    for i in range(len(n_pix)):
        ratio.append(n_pix[i, 1] / float(n_pix[i, 0]))

    print(time() - ts)
    return pix_cat, n_pix, ratio
Esempio n. 10
0
def plot_finder_image(
    target,
    survey="DSS2 Red",
    fov_radius=30 * u.arcmin,
    fov=None,
    log=False,
    ax=None,
    grid=False,
    reticle=False,
    style_kwargs=None,
    reticle_style_kwargs=None,
    pixels=500,
    inverted=False,
    stf=True,
):
    """
    Plot survey image centered on ``target``.

    Survey images are retrieved from NASA Goddard's SkyView service via
    ``astroquery.skyview.SkyView``.

    If a `~matplotlib.axes.Axes` object already exists, plots the finder image
    on top. Otherwise, creates a new `~matplotlib.axes.Axes`
    object with the finder image.

    Parameters
    ----------
    target : `~astroplan.FixedTarget`, `~astropy.coordinates.SkyCoord`
        Coordinates of celestial object

    survey : string
        Name of survey to retrieve image from. For dictionary of
        available surveys, use
        ``from astroquery.skyview import SkyView; SkyView.list_surveys()``.
        Defaults to ``'DSS'``, the Digital Sky Survey.

    fov_radius : `~astropy.units.Quantity`
        Radius of field of view of retrieved image. Defaults to 10 arcmin.

    log : bool, optional
        Take the natural logarithm of the FITS image if `True`.
        False by default.

    ax : `~matplotlib.axes.Axes` or None, optional.
        The `~matplotlib.axes.Axes` object to be drawn on.
        If None, uses the current `~matplotlib.axes.Axes`.

    grid : bool, optional.
        Grid is drawn if `True`. `False` by default.

    reticle : bool, optional
        Draw reticle on the center of the FOV if `True`. Default is `False`.

    style_kwargs : dict or `None`, optional.
        A dictionary of keywords passed into `~matplotlib.pyplot.imshow`
        to set plotting styles.

    reticle_style_kwargs : dict or `None`, optional
        A dictionary of keywords passed into `~matplotlib.pyplot.axvline` and
        `~matplotlib.pyplot.axhline` to set reticle style.

    Returns
    -------
    ax : `~matplotlib.axes.Axes`
        Matplotlib axes with survey image centered on ``target``

    hdu : `~astropy.io.fits.PrimaryHDU`
        FITS HDU of the retrieved image


    Notes
    -----
    Dependencies:
        In addition to Matplotlib, this function makes use of astroquery and WCSAxes.
    """

    import matplotlib.pyplot as plt
    from astroquery.skyview import SkyView

    coord = target if not hasattr(target, "coord") else target.coord
    position = coord.icrs
    coordinates = "icrs"
    target_name = None if isinstance(target, SkyCoord) else target.name

    if fov:
        width, height = fov
        hdu = SkyView.get_images(
            position=position,
            coordinates=coordinates,
            survey=survey,
            width=width,
            height=height,
            grid=grid,
            pixels=pixels,
        )[0][0]
    else:
        hdu = SkyView.get_images(
            position=position,
            coordinates=coordinates,
            survey=survey,
            radius=fov_radius,
            grid=grid,
            pixels=pixels,
        )[0][0]

    wcs = WCS(hdu.header)

    # Set up axes & plot styles if needed.
    if ax is None:
        ax = plt.gca(projection=wcs)
    if style_kwargs is None:
        style_kwargs = {}
    style_kwargs = dict(style_kwargs)
    if inverted:
        style_kwargs.setdefault("cmap", "Greys")
    else:
        style_kwargs.setdefault("cmap", "Greys_r")
    style_kwargs.setdefault("origin", "lower")

    if stf:
        image_data = auto_stf(hdu.data)
        style_kwargs.setdefault("vmin", 0)
        style_kwargs.setdefault("vmax", 1)

    elif log:
        image_data = np.log(hdu.data)
    else:
        image_data = hdu.data
    ax.imshow(image_data, **style_kwargs)

    # Draw reticle
    if reticle:
        pixel_width = image_data.shape[0]
        inner, outer = 0.03, 0.08

        if reticle_style_kwargs is None:
            reticle_style_kwargs = {}
        reticle_style_kwargs.setdefault("linewidth", 2)
        reticle_style_kwargs.setdefault("color", "m")

        ax.axvline(x=0.5 * pixel_width,
                   ymin=0.5 + inner,
                   ymax=0.5 + outer,
                   **reticle_style_kwargs)
        ax.axvline(x=0.5 * pixel_width,
                   ymin=0.5 - inner,
                   ymax=0.5 - outer,
                   **reticle_style_kwargs)
        ax.axhline(y=0.5 * pixel_width,
                   xmin=0.5 + inner,
                   xmax=0.5 + outer,
                   **reticle_style_kwargs)
        ax.axhline(y=0.5 * pixel_width,
                   xmin=0.5 - inner,
                   xmax=0.5 - outer,
                   **reticle_style_kwargs)

    # Labels, title, grid
    ax.set(xlabel="RA", ylabel="DEC")
    if target_name is not None:
        ax.set_title(target_name)
    ax.grid(grid)

    # Redraw the figure for interactive sessions.
    ax.figure.canvas.draw()
    return ax, hdu
Esempio n. 11
0
def catalog_image(reference,
                  psf,
                  catalog='1FHL',
                  source_type='point',
                  total_flux=False,
                  sim_table=None):
    """Creates an image from a simulated catalog, or from 1FHL or 2FGL sources.

    Parameters
    ----------
    reference : `~fits.ImageHDU`
        Reference Image HDU. The output takes the shape and resolution of this.
    psf : `~gammapy.irf.EnergyDependentTablePSF`
        Energy dependent Table PSF object for image convolution.
    catalog : {'1FHL', '2FGL', 'simulation'}
        Flag which source catalog is to be used to create the image.
        If 'simulation' is used, sim_table must also be provided.
    source_type : {'point', 'extended', 'all'}
        Specify whether point or extended sources should be included, or both.
        TODO: Currently only 'point' is implemented.
    total_flux : bool
        Specify whether to conserve total flux.
    sim_table : `~astropy.table.Table`
        Table of simulated point sources. Only required if catalog = 'simulation'

    Returns
    -------
    out_cube : `~gammapy.data.SpectralCube`
        2D Spectral cube containing the image.

    Notes
    -----
    This is currently only implemented for a single energy band.
    """
    from scipy.ndimage import convolve
    # This import is here instead of at the top to avoid an ImportError
    # due to circular dependencies
    from ..data import SpectralCube

    lons, lats = coordinates(reference)
    wcs = WCS(reference.header)
    # Uses dummy energy for now to construct spectral cube
    # TODO : Fix this hack
    reference_cube = SpectralCube(data=Quantity(np.array(reference.data), ''),
                                  wcs=wcs,
                                  energy=Quantity([0, 1], 'GeV'))

    if source_type == 'extended':
        raise NotImplementedError
        # TODO: Currently fluxes are not correct for extended sources.
        new_image = _extended_image(catalog, reference_cube)

    elif source_type == 'point':
        new_image, energy = _source_image(catalog, reference_cube, sim_table,
                                          total_flux)

    elif source_type == 'all':
        raise NotImplementedError
        # TODO: Currently Extended Sources do not work
        extended = _extended_image(catalog, reference_cube)
        point_source = _source_image(catalog, reference_cube,
                                     total_flux=True)[0]
        new_image = extended + point_source

    else:
        raise ValueError

    total_point_image = SpectralCube(data=new_image, wcs=wcs, energy=energy)
    convolved_cube = new_image.copy()

    psf = psf.table_psf_in_energy_band(
        Quantity(
            [np.min(energy).value, np.max(energy).value], energy.unit))

    resolution = abs(reference.header['CDELT1'])

    kernel_array = psf.kernel(pixel_size=Angle(resolution, 'deg'),
                              offset_max=Angle(5, 'deg'),
                              normalize=True)

    convolved_cube = convolve(new_image, kernel_array, mode='constant')

    out_cube = SpectralCube(data=Quantity(convolved_cube, ''),
                            wcs=total_point_image.wcs,
                            energy=energy)

    return out_cube
Esempio n. 12
0
 def wcs(self):
     return WCS(self.header)
Esempio n. 13
0
def main(args):
    
    ## make timeStep array by reading csv file
    timeSteps = []
    with open(str(args.obs) + "-" + str(args.noradid) + ".csv") as csv_file:
         csv_reader = csv.reader(csv_file, delimiter=",")
         for row in csv_reader:
             timeSteps.append(int(row[0]))
    
    cube = []
    
    ### get tle
    hdu = fits.open(str(args.obs)+ str(args.band) + "-" + str(args.midName) + "-" + str(timeSteps[0]) + "h-" + str(0).zfill(4) + "-dirty.fits" )
    ref_utc = datetime.strptime(hdu[0].header["DATE-OBS"], '%Y-%m-%dT%H:%M:%S.%f')
    line1, line2, line3 = obtainTLE(args.noradid,ref_utc, args.obs)
    sat = getSat(line1, line2, line3)
    
    
    ## get rotation array
    print("calculating rotations...")
    rotation_array = []
    for t in timeSteps:
        hduH = fits.open(str(args.obs)+ str(args.band) + "-" + str(args.midName) + "-" + str(t) + "h-" + str(0).zfill(4) + "-dirty.fits" )
        utc = datetime.strptime(hduH[0].header["DATE-OBS"], '%Y-%m-%dT%H:%M:%S.%f')
        wcs = WCS(hduH[0].header, naxis=2)
        slope = getRotation(sat, utc, wcs)
        rotation_array.append(slope)
    print("done")
    
    for f in tqdm(range(args.channels)):
        global_data = []
        w = []
        for t, slope in zip(timeSteps,rotation_array):
            hduH = fits.open(str(args.obs)+ str(args.band) + "-" + str(args.midName) + "-" + str(t) + "h-" + str(f).zfill(4) + "-dirty.fits" )
            hduT = fits.open(str(args.obs)+ str(args.band) + "-" + str(args.midName) + "-" + str(t) + "t-" + str(f).zfill(4) + "-dirty.fits" )
            diff = hduH[0].data[0,0,:,:] - hduT[0].data[0,0,:,:] 
        
            ## the below logic is to avoid getting nans due to inverting noise = 0
            if np.std(diff) == 0:
                continue
            else:
                diff = rotate(diff, slope, order=5, reshape=False)
                global_data.append(diff)
                w.append(np.std(diff))

        if not w:
            cube.append(np.zeros(diff.shape))
        else:
            w = np.array(w)
            weights = 1/w
            try:
                stack = np.average(np.array(global_data), axis=0, weights=weights)
                cube.append(stack)
            except:
                cube.append(np.zeros(diff.shape))

    np.save("weightedRotated"+ str(args.noradid) + "-" + str(args.obs) + ".npy", cube)

    ## make images of all 6sigma events
    cube = np.array(cube)

    ### check if cube is full of nans, and if so exit(1)
    if np.all(np.isnan(cube)):
        print("cube full of zeros. terminating..")
        sys.exit(1)

    for f in range(cube.shape[0]):
        temp1 = np.copy(cube[f,:,:])
        temp2 = np.copy(cube[f,:,:])
        signal = np.nanmax(temp1)
        temp2[np.abs(temp2) > 3*np.std(temp2)] = 0
        temp2[np.abs(temp2) > 3*np.std(temp2)] = 0

        noise = np.std(temp2)
        snr = signal/noise

        if snr >= 6:
            plt.clf()
            plt.imshow(cube[f,:,:], origin="lower")
            plt.colorbar()
            plt.title("channel {}".format(f))
            plt.savefig("weightedEvent" + str(f).zfill(4) + ".png")
Esempio n. 14
0
def coaddShowCModel(prefix,
                    galId,
                    filter='HSC-I',
                    ref=False,
                    root=None,
                    noParent=False,
                    zeropoint=27.0,
                    mag0=24.5,
                    mag1=18.0,
                    fontSize=14,
                    figSize=14,
                    showSource=True,
                    verbose=True):
    """Visualize the cModel fitting results for given coadd image."""
    galId = str(galId).strip()
    # Get the coadd data, both image and catalog
    imgData, imgHead, catData = getCoaddData(prefix,
                                             galId,
                                             filter=filter,
                                             root=root,
                                             ref=ref)
    # Objects with useful cModel information
    catCModel = catData[(np.isfinite(catData['cmodel_flux']))
                        & (np.isfinite(catData['cmodel_exp_flux'])) &
                        (np.isfinite(catData['cmodel_dev_flux'])) &
                        (catData['cmodel_flux'] > 0.0) &
                        (catData['deblend_nchild'] == 0) &
                        (catData['classification_extendedness'] >= 0.5)]
    if verbose:
        print "### %d objects with useful cModel information" % len(catCModel)
    # Convert (RA, DEC) into (X, Y)
    wcs = WCS(imgHead)
    raCmod, decCmod = srcToRaDec(catCModel)
    xyCmod = wcs.wcs_world2pix((catCModel['coord'] * 180.0 / np.pi), 1)
    xCmod, yCmod = xyCmod[:, 0], xyCmod[:, 1]
    # Get cModelMagnitude
    magCmod = catFlux2Mag(catCModel, 'cmodel_flux', zeropoint=zeropoint)
    # Convert into color array
    colorCmod = toColorArr(magCmod, top=mag0, bottom=mag1)
    #
    ellipList = ['cmodel_exp_ellipse', 'cmodel_dev_ellipse', 'shape_sdss']
    ellipType = ['Exponential', 'de Vacouleur', 'SDSS Shape']
    for (ii, ellipName) in enumerate(ellipList):
        if verbose:
            print "### Start to work on %s model !" % ellipName
        ellipse = catCModel[ellipName]
        loc = os.path.join(root, galId, filter)
        pngFile = os.path.join(
            loc,
            prefix + '_' + galId + '_' + filter + '_' + ellipName + '.png')
        showCmodel(imgData,
                   xCmod,
                   yCmod,
                   ellipse,
                   colorCmod,
                   figSize=figSize,
                   fontSize=fontSize,
                   filter=filter,
                   ellipName=ellipType[ii],
                   showSource=showSource,
                   mag0=mag0,
                   mag1=mag1,
                   figName=pngFile)
Esempio n. 15
0
Q_tot = Q * (1 + sd_star / sds * mom2 / vdstar)**(-1)
Qtot_binned = np.nanmean(np.nanmean(Q_tot.reshape(int(960 / bin), bin,
                                                  int(960 / bin), bin),
                                    axis=-1),
                         axis=1)

SFR = fits.getdata(imageDir + 'NGC5257_33GHz_pbcor_regrid.fits')[0][0]
SFR_binned = np.nanmean(np.nanmean(SFR.reshape(int(960 / bin), bin,
                                               int(960 / bin), bin),
                                   axis=-1),
                        axis=1)
beammajor = 0.7
beamminor = 0.58
freq = 33
SFR_phy_binned = ssfr_radio(SFR_binned, beammajor, beamminor, freq, D)
wcs_sfr = WCS(
    fits.getheader(imageDir + 'NGC5257_33GHz_pbcor_regrid_smooth.fits'))
wcs_sfr = wcs_sfr.celestial
levels = [2 * 1.0e-5]

fig = plt.figure()
sc = plt.scatter(R_binned,
                 Qtot_binned,
                 c=mom0_binned,
                 marker='.',
                 cmap='brg_r')
cbar = plt.colorbar(sc)
cbar.set_label('12CO 2-1 intensity $Jy km s^{-1} beam^{-1}$', fontsize=20)
plt.xlabel('Radius (kpc)', fontsize=20)
plt.ylabel('Toomre factor Q', fontsize=20)
fig.tight_layout()
plt.savefig(picDir + 'NGC5257_Toomre_scatter.png')
Esempio n. 16
0
def offset_with_orientation(observation,
                            catalog,
                            wcsprm,
                            verbose=True,
                            fast=False,
                            report_global="",
                            INCREASE_FOV_FLAG=False,
                            silent=False):
    """Use simple_offset(...) but with trying 0,90,180,270 rotation.

    Parameters
    ----------
    observation : dataframe
        pandas dataframe with sources on the observation
    catalog : dataframe
        pandas dataframe with nearby sources from online catalogs with accurate astrometric information
    wcsprm
        Wcsprm file
    verbose : boolean
        Set to False to supress output to the console
    fast : boolean
        If true will run with subset of the sources to increase speed.


    Returns
    -------
    wcs, signal, report

    """
    observation = copy.copy(observation)
    N_SOURCES = observation.shape[0]
    if (fast):
        if (N_SOURCES > s.USE_N_SOURCES):
            N_SOURCES = s.USE_N_SOURCES
        observation = observation.nlargest(N_SOURCES, 'aperture_sum')
        if (INCREASE_FOV_FLAG):
            N_CATALOG = N_SOURCES * 12
        else:
            N_CATALOG = N_SOURCES * 4
        catalog = catalog.nsmallest(N_CATALOG, 'mag')
    if (verbose):
        #print("--------------------------------------")
        #print("offset_with_orientation, seaching for offset while considering reflections and 0,90,180,270 rotations")
        print(
            "offset_with_orientation, seaching for offset while considering 0,90,180,270 rotations"
        )
        if (fast):
            print("running in fast mode")
    rotations = [
        [[1, 0], [0, 1]],
        [
            [-1, 0], [0, -1]
        ],  #already checking for reflections with the scaling and general rotations, but in case rot_scale is of it is nice to have
        [[-1, 0], [0, 1]],
        [[1, 0], [0, -1]],
        [[0, 1], [1, 0]],
        [[0, -1], [-1, 0]],
        [[0, -1], [1, 0]],
        [[0, 1], [-1, 0]],
    ]
    # rotations = [ [[1,0],[0,1]], [[-1,0],[0,-1]],
    #               [[0,-1],[1,0]], [[0,1],[-1,0]],
    #             ]
    wcsprm_global = copy.copy(wcsprm)
    results = []
    for rot in rotations:
        if (verbose):
            print("Trying rotation {}".format(rot))
            #print(report)
        wcsprm = rotate(copy.copy(wcsprm_global), rot)
        #wcs = WCS(...)
        report = report_global + "---- Report for rotation {} ---- \n".format(
            rot)
        wcsprm, signal, report = simple_offset(observation, catalog, wcsprm,
                                               report)
        results.append([copy.copy(wcsprm), signal, report])

    signals = [i[1] for i in results]
    median = np.median(signals)
    i = np.argmax(signals)
    wcsprm = results[i][0]
    signal = signals[i]
    #hist = results[i][3]
    report = results[i][2]
    report = report + "A total of {} sources from the fits file where used. \n".format(
        N_SOURCES)
    report = report + "The signal (#stars) is {} times higher than noise outlierers for other directions. (more than 2 would be nice, typical: 8 for PS)\n".format(
        signals[i] / median)

    if (verbose):
        print("We found the following world coordinates: ")
        print(WCS(wcsprm.to_header()))
        print("And here is the report:")
        print(report)
        print("-----------------------------")
    off = wcsprm.crpix - wcsprm_global.crpix
    if (not silent):
        print("Found offset {:.3g} in x direction and {:.3g} in y direction".
              format(off[0], off[1]))

    return wcsprm, signal, report
Esempio n. 17
0
    parser.add_argument("--passwd",
                        required=True,
                        help="Password for space-track.org")
    parser.add_argument("--debug",
                        default=False,
                        type=bool,
                        help="run scirpt in debug mode")
    args = parser.parse_args()

    global debug
    debug = args.debug

    global query
    query = st.SpaceTrackClient(args.user, args.passwd)

    ## get header info and make them global
    hdu = fits.open("6Sigma1FloodfillSigmaRFIBinaryMap-t" +
                    str(args.t1).zfill(4) + ".fits")

    global wcs, imgSize, startUTC, pixel_scale
    wcs = WCS(hdu[0].header, naxis=2)
    imgSize = hdu[0].header["NAXIS1"]
    pixel_scale = hdu[0].header["CDELT2"]
    startUTC = datetime.strptime(hdu[0].header['DATE-OBS'][:-2],
                                 '%Y-%m-%dT%H:%M:%S')

    if debug:
        eprint("running in debug mode")

    main(args.obs, args.t1, args.t2)
Esempio n. 18
0
    def getWCS(self, srcDir, imgName, ra0=-1000, dec0=-1000):

        starttime = datetime.now()

        os.system("rm -rf %s/*" % (self.tmpDir))

        imgpre = imgName.split(".")[0]
        oImgf = "%s/%s.fit" % (srcDir, imgpre)
        oImgfz = "%s/%s.fit.fz" % (srcDir, imgpre)
        if os.path.exists(oImgf):
            os.system("cp %s/%s.fit %s/%s" %
                      (srcDir, imgpre, self.tmpDir, self.objectImg))
        elif os.path.exists(oImgfz):
            os.system("cp %s/%s.fit.fz %s/%s.fz" %
                      (srcDir, imgpre, self.tmpDir, self.objectImg))
            os.system("%s %s/%s.fz" %
                      (self.funpackProgram, self.tmpDir, self.objectImg))
        else:
            self.log.warning("%s not exist" % (oImgf))
            return False

        fieldId, cra, cdec = self.tools.removeHeaderAndOverScan(
            self.tmpDir, self.objectImg)

        fpar = 'sex_diff.par'
        sexConf = [
            '-DETECT_MINAREA', '10', '-DETECT_THRESH', '5', '-ANALYSIS_THRESH',
            '5', '-CATALOG_TYPE', 'FITS_LDAC'
        ]
        tmplCat = self.tools.runSextractor(self.objectImg,
                                           self.tmpDir,
                                           self.tmpDir,
                                           fpar,
                                           sexConf,
                                           cmdStatus=0,
                                           outSuffix='_ldac.fit')
        self.tools.ldac2fits('%s/%s' % (self.tmpDir, tmplCat),
                             '%s/ti_cat.fit' % (self.tmpDir))

        if ra0 < 360 and ra0 > 0 and dec0 > -90 and dec0 < 90:
            cra, cdec = ra0, dec0
        runSuccess = self.tools.runWCS(self.tmpDir, 'ti_cat.fit', cra, cdec)

        wcsfile = 'ti_cat.wcs'
        if runSuccess:
            wcs = WCS('%s/%s' % (self.tmpDir, wcsfile))
            #ra_center, dec_center = wcs.all_pix2world(4096/2, 4136/2, 1) #4136, 4096
            ra_center, dec_center = wcs.all_pix2world(self.imgSize[1] / 2,
                                                      self.imgSize[0] / 2, 1)
            print(
                'read_ra_center:%.5f, read_dec_center:%.5f, real_ra_center:%.5f, real_dec_center:%.5f'
                % (cra, cdec, ra_center, dec_center))
        else:
            print('%s, get wcs error' % (imgName))
            ra_center, dec_center = 0, 0

        endtime = datetime.now()
        runTime = (endtime - starttime).seconds
        self.log.info("********** get WCS %s use %d seconds" %
                      (imgName, runTime))
        print("********** get WCS %s use %d seconds" % (imgName, runTime))

        return wcsfile, ra_center, dec_center
Esempio n. 19
0
    def setUp(self):
        # self.browser = webdriver.Firefox()

        super(AnalyserTest, self).setUp()

        self.username = '******'
        self.password = '******'
        self.email = '*****@*****.**'
        self.marge = User.objects.create_user(username=self.username,
                                              password=self.password,
                                              email=self.email)
        self.marge.first_name = 'Marge'
        self.marge.last_name = 'Simpson'
        self.marge.is_active = 1
        self.marge.save()

        null_wcs = WCS()
        params = {
            'sitecode': 'K93',
            'instrument': 'kb75',
            'filter': 'w',
            'filename': 'file1.fits',
            'exptime': 40.0,
            'midpoint': '2017-01-01 21:09:51',
            'frametype': Frame.BANZAI_RED_FRAMETYPE,
            'block': self.test_block,
            'frameid': 1,
            'wcs': null_wcs
        }
        self.frame1 = Frame.objects.create(**params)
        params = {
            'sitecode': 'K93',
            'instrument': 'kb75',
            'filter': 'w',
            'filename': 'file2.fits',
            'exptime': 40.0,
            'midpoint': '2017-01-01 21:20:00',
            'frametype': Frame.BANZAI_RED_FRAMETYPE,
            'block': self.test_block,
            'frameid': 2,
            'wcs': null_wcs
        }
        self.frame2 = Frame.objects.create(**params)
        params1 = {
            'body': self.body,
            'frame': self.frame1,
            'obs_ra': 10.1,
            'obs_dec': 10.2,
            'aperture_size': 1
        }
        self.source1 = SourceMeasurement.objects.create(**params1)
        params2 = {
            'body': self.body,
            'frame': self.frame2,
            'obs_ra': 10.15,
            'obs_dec': 10.25,
            'aperture_size': 1
        }
        self.source2 = SourceMeasurement.objects.create(**params2)

        self.test_block.num_observed = 1
        self.test_block.save()

        # Build Candidate --- WHY??? This cannot be the best way...
        self.dtypes =\
             {  'names' : ('det_number', 'frame_number', 'sext_number', 'jd_obs', 'ra', 'dec', 'x', 'y', 'mag', 'fwhm', 'elong', 'theta', 'rmserr', 'deltamu', 'area', 'score', 'velocity', 'sky_pos_angle', 'pixels_frame', 'streak_length'),
                'formats' : ('i4',       'i1',           'i4',          'f8',     'f8', 'f8', 'f4', 'f4', 'f4', 'f4',   'f4',    'f4',    'f4',     'f4',       'i4',   'f4',   'f4',       'f4',        'f4',           'f4')
             }

        self.dets_array = array([
            (1, 1, 3283, 2457444.656045, 10.924317, 39.27700, 2103.245,
             2043.026, 19.26, 12.970, 1.764, -60.4, 0.27, 1.39, 34, 1.10,
             0.497, 0.2, 9.0, 6.7),
            (1, 2, 0, 2457444.657980, 10.924298, 39.27793, 2103.468, 2043.025,
             0.00, 1.000, 1.000, 0.0, 0.27, 0.00, 0, 1.10, 0.497, 0.2, 9.0,
             6.7),
            (1, 3, 3409, 2457444.659923, 10.924271, 39.27887, 2104.491,
             2043.034, 19.20, 11.350, 1.373, -57.3, 0.27, 1.38, 52, 1.10,
             0.497, 0.2, 9.0, 6.7),
            (1, 4, 3176, 2457444.661883, 10.924257, 39.27990, 2104.191,
             2043.844, 19.01, 10.680, 1.163, -41.5, 0.27, 1.52, 52, 1.10,
             0.497, 0.2, 9.0, 6.7),
            (1, 5, 3241, 2457444.663875, 10.924237, 39.28087, 2104.365,
             2043.982, 19.17, 12.940, 1.089, -31.2, 0.27, 1.27, 55, 1.10,
             0.497, 0.2, 9.0, 6.7),
            (1, 6, 3319, 2457444.665812, 10.924220, 39.28172, 2104.357,
             2043.175, 18.82, 12.910, 1.254, -37.8, 0.27, 1.38, 69, 1.10,
             0.497, 0.2, 9.0, 6.7),
        ],
                                dtype=self.dtypes)
        self.dets_byte_array = self.dets_array.tostring()
        params3 = {
            'block': self.test_block,
            'cand_id': 1,
            'score': 1.42,
            'avg_midpoint': datetime(2016, 2, 26, 3, 53, 7),
            'avg_x': 1024.0,
            'avg_y': 1042.3,
            'avg_ra': 123.42,
            'avg_dec': -42.3,
            'avg_mag': 20.7,
            'speed': 0.497,
            'sky_motion_pa': 90.4,
            'detections': self.dets_byte_array
        }
        self.candidate = Candidate.objects.create(**params3)
Esempio n. 20
0
def test():

    #toolPath = '/home/gwac/img_diff_xy/image_diff'
    toolPath = '/home/xy/Downloads/myresource/deep_data2/image_diff'
    tools = AstroTools(toolPath)

    dataDest0 = "/home/xy/gwac_diff_xy/data"
    logDest0 = "/home/xy/gwac_diff_xy/log"

    if not os.path.exists(dataDest0):
        os.system("mkdir -p %s" % (dataDest0))
    if not os.path.exists(logDest0):
        os.system("mkdir -p %s" % (logDest0))

    startProcess = False
    dayRun = 0
    nigRun = 0
    skyId = 0
    ffId = 0
    tfiles = []
    #srcPath00='/data1/G004_041_190124'
    srcPath00 = '/home/xy/Downloads/myresource/matchTest'
    dateStr = '190124'
    camName = 'G041'
    curSkyId = '123'

    dstDir = '%s/%s' % (dataDest0, dateStr)
    tdiff = BatchImageDiff(srcPath00, dstDir, tools, camName, curSkyId)

    #tpath1 = '/data2/G003_034_190211'
    #tpath2 = '/data2/G003_034_190227'
    tpath1 = '/home/xy/Downloads/myresource/matchTest'
    tpath2 = '/home/xy/Downloads/myresource/matchTest'
    fname1 = 'G034_mon_objt_190211T12192603.fit'
    fname2 = 'G044_mon_objt_190305T13070793.fit'

    ra0, dec0 = -1000, -1000
    wcsfile1, ra_center1, dec_center1 = tdiff.getWCS(tpath1, fname1, ra0, dec0)
    wcs1 = WCS('%s/%s' % (tdiff.tmpDir, wcsfile1))
    wcsfile2, ra_center2, dec_center2 = tdiff.getWCS(tpath2, fname2, ra0, dec0)
    wcs2 = WCS('%s/%s' % (tdiff.tmpDir, wcsfile2))

    os.system("cp %s/%s %s/%s" % (tpath1, fname1, tdiff.tmpDir, fname1))
    os.system("%s %s/%s" % (tdiff.funpackProgram, tdiff.tmpDir, fname1))
    os.system("cp %s/%s %s/%s" % (tpath2, fname2, tdiff.tmpDir, fname2))
    os.system("%s %s/%s" % (tdiff.funpackProgram, tdiff.tmpDir, fname2))

    fpar = 'sex_diff.par'
    #sexConf=['-DETECT_MINAREA','3','-DETECT_THRESH','2.5','-ANALYSIS_THRESH','2.5']
    sexConf = [
        '-DETECT_MINAREA', '10', '-DETECT_THRESH', '5', '-ANALYSIS_THRESH', '5'
    ]
    tcat1 = tools.runSextractor('G034_mon_objt_190211T12192603.fit',
                                tdiff.tmpDir, tdiff.tmpDir, fpar, sexConf)
    tcat2 = tools.runSextractor('G044_mon_objt_190305T13070793.fit',
                                tdiff.tmpDir, tdiff.tmpDir, fpar, sexConf)

    tdata2 = np.loadtxt("%s/%s" % (tdiff.tmpDir, tcat2))
    tXY = tdata2[:, 0:2]
    print(tXY[:3])
    tRaDec = wcs2.all_pix2world(tXY, 1)
    print(tRaDec[:3])
    tXY2 = wcs1.all_world2pix(tRaDec, 1)
    print(tXY2[:3])
    tdata2[:, 0:2] = tXY2
    saveName = "%s_trans.cat" % (fname2.split(".")[0])
    savePath = "%s/%s" % (tdiff.tmpDir, saveName)
    np.savetxt(savePath, tdata2, fmt='%.4f')

    mchFile, nmhFile, mchPair = tools.runCrossMatch(tdiff.tmpDir, saveName,
                                                    tcat1, 1)
    evaluatePos(tdiff.tmpDir, saveName, tcat1, mchPair)
    evaluatePos(tdiff.tmpDir, tcat2, tcat1, mchPair)
Esempio n. 21
0
def test_wcs_arithmetic():
    ccd_data = create_ccd_data()
    wcs = WCS(naxis=2)
    ccd_data.wcs = wcs
    result = ccd_data.multiply(1.0)
    nd_testing.assert_wcs_seem_equal(result.wcs, wcs)
from astropy.io import fits
from astropy.wcs import WCS
from spectral_cube import SpectralCube
import matplotlib.pyplot as plt

fn = 'img_custom_CO_J1-0_LTE_jypxl_edgeon.fits'
hdu = fits.open(fn)[0]
hdu.header['CUNIT3'] = 'm/s'
w = WCS(hdu.header)
cube = SpectralCube(data=hdu.data.squeeze(), wcs=w.dropaxis(3))

m0 = cube.moment(order=0)
m1 = cube.moment(order=1)
m0.write('moment0_' + fn, overwrite=True)
m1.write('moment1_' + fn, overwrite=True)

fig, ax = plt.subplots(ncols=2,
                       subplot_kw={'projection': w.celestial},
                       figsize=(12, 6))

im0 = ax[0].imshow(m0.array, cmap='hot', origin='lower')
im1 = ax[1].imshow(m1.array, cmap='nipy_spectral', origin='lower')

cb0 = fig.colorbar(im0,
                   ax=ax[0],
                   orientation='horizontal',
                   format='%.2f',
                   pad=0.1)
cb1 = fig.colorbar(im1, ax=ax[1], orientation='horizontal', pad=0.1)

ax[0].set_title('Moment 0 - edgeon', fontsize=14)
Esempio n. 23
0
def stack_spectrogram_sequence(cube_sequence, memmap=True, reproject=False):
    """
    Given a sequence of IRIS rasters stack them into a single `ndcube.NDCube`.

    .. warning::

        This is intended to be used for plotting only, it will not preserve the
        flux in the image.

    Parameters
    ----------
    cube_sequence : `sunraster.spectrogram_sequence.RasterSequence`
        The input arrays to regrid.

    memmap : `bool`
        Use a temporary file to store the re-gridded data in rather than in memory.

    Returns
    -------
    `ndcube.NDCube`: A 4D cube with a new time dimension

    """
    if len(cube_sequence.data) == 1:
        raise ValueError("No point doing this to one raster")

    if memmap:
        if not isinstance(memmap, Path):
            memmap = tempfile.TemporaryFile()

    target_wcs = cube_sequence[0].wcs
    target_shape = cube_sequence[0].data.shape

    cube_shape = tuple([len(cube_sequence.data)] + list(target_shape))
    memmap = np.memmap(memmap, cube_sequence[0].data.dtype,
                       "w+", shape=cube_shape) if memmap else np.empty(cube_shape)

    times = [cube_sequence[0].extra_coords['time']['value'][0]]
    memmap[0] = cube_sequence[0].data
    for i, cube in enumerate(cube_sequence.data[1:]):
        if not reproject:
            memmap[i+1] = cube_sequence[i+1].data
        else:
            reproject_interp((cube.data, cube.wcs),
                             target_wcs, shape_out=target_shape,
                             hdu_in=0,
                             order=0,
                             return_footprint=False,
                             output_array=memmap[i+1])
        times.append(cube.extra_coords['time']['value'][0])

    times = Time(times)
    dts = times[1:] - times[:-1]

    if u.allclose(dts[0].to(u.s), dts.to(u.s), atol=0.5*u.s):
        dt = dts[0]
    else:
        raise ValueError("Can't handle tabular wcs")

    out_wcs = WCS(naxis=4)
    out_wcs.wcs.crpix = list(target_wcs.wcs.crpix) + [0]
    out_wcs.wcs.crval = list(target_wcs.wcs.crval) + [0]
    out_wcs.wcs.cdelt = list(target_wcs.wcs.cdelt) + [dt.to(u.s).value]
    out_wcs.wcs.ctype = list(target_wcs.wcs.ctype) + ['TIME']
    out_wcs.wcs.cunit = list(target_wcs.wcs.cunit) + ['s']

    pc = np.identity(4)
    pc[:3, :3] = target_wcs.wcs.pc

    out_wcs.wcs.pc = pc

    return NDCube(memmap, out_wcs)
Esempio n. 24
0
def read_spice_l2_fits(filename, windows=None, memmap=True):
    """Read SPICE level 2 FITS file.

    Parameters
    ----------
    filename: `str`
        The name, including path, of the SPICE FITS file to read.

    windows: iterable of `str`
        The names of the windows to read.
        Default=None implies all windows read out.

    memmap: `bool`
        If True, FITS file is reading with memory mapping.

    Returns
    -------
    output: `ndcube.NDCollection` or `sunraster.SpectrogramCube`
        A collection of spectrogram cubes, one for each window.
        If only one window present or requested, a single spectrogram cube is returned.
    """
    window_cubes = []
    with fits.open(filename, memmap=memmap) as hdulist:
        # Retrieve window names from FITS file.
        if windows is None:
            windows = [
                hdu.header["EXTNAME"] for hdu in hdulist
                if hdu.header["EXTNAME"] != "VARIABLE_KEYWORDS"
            ]
        for i, hdu in enumerate(hdulist):
            if hdu.header["EXTNAME"] in windows:
                # Define metadata object.
                meta = SPICEMeta(
                    hdu.header,
                    comments=_convert_fits_comments_to_key_value_pairs(
                        hdu.header))
                # Rename WCS time axis to time.
                meta.update([("CTYPE4", "TIME")])
                new_header = copy.deepcopy(hdu.header)
                new_header["CTYPE4"] = "TIME"
                # Define WCS from new header
                wcs = WCS(new_header)
                # Define exposure times from metadata.
                exp_times = TimeDelta(np.repeat(meta.get("XPOSURE"),
                                                hdu.data.shape[-1]),
                                      format="sec")
                # Define data cube.
                data = hdu.data
                spectrogram = SpectrogramCube(
                    data=data,
                    wcs=wcs,
                    mask=np.isnan(data),
                    unit=u.adu,
                    extra_coords=[("exposure time", -1, exp_times)],
                    meta=meta,
                    instrument_axes=("raster scan", "spectral", "slit",
                                     "slit step"))
                window_cubes.append((meta.get("EXTNAME"), spectrogram))

    if len(windows) > 1:
        aligned_axes = np.where(
            np.asarray(spectrogram.world_axis_physical_types) != "em.wl")[0]
        aligned_axes = tuple([int(i) for i in aligned_axes])
        output = NDCollection(window_cubes, aligned_axes=aligned_axes)
    else:
        output = spectrogram

    return output
Esempio n. 25
0
def test_rotation_matrix(map_data, hpc_coord):
    header = make_fitswcs_header(map_data,
                                 hpc_coord,
                                 rotation_matrix=np.array([[1, 0], [0, 1]]))
    wcs = WCS(header)
    np.testing.assert_allclose(wcs.wcs.pc, [[1, 0], [0, 1]], atol=1e-5)
Esempio n. 26
0
import numpy as np
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt

DRA = np.loadtxt('2MASS.GC.cat', usecols=(0, ))
DDEC = np.loadtxt('2MASS.GC.cat', usecols=(1, ))
#f = np.loadtxt('N2768_f.GC.dat', usecols=(1,))

fits_file = 'f.fits'
hdu = fits.open(fits_file)[0]
wcs = WCS(hdu.header)

coords = zip(DRA, DDEC)

ra = []
dec = []

pix = wcs.wcs_world2pix(coords, 1)

for i in range(0, len(pix)):
    ra.append(pix[i][0])
    dec.append(pix[i][1])

plt.imshow(hdu.data, origin='lower', cmap='gray', label='All GCs')
#plt.scatter(DRA, DDEC, c=f, cmap='Blues')
plt.plot(DRA, DDEC, 'bo')
#cb = plt.colorbar()
#cb.set_label('Probability Of Being in the Spheroid')
Esempio n. 27
0
            if retcode < 0:
                rootLogger.info("Child was terminated by signal" +
                                str(-retcode))
            else:
                rootLogger.info("Child returned" + str(retcode))
        except OSError as e:
            rootLogger.info("PSPhot Execution failed:" + str(e))

        # Fix output catalog coordinates
        # origin=0.5
        if os.path.exists(base + ".cmf"):
            cat = Table(fits.getdata(base + ".cmf", 1))
            cat2 = fits.getdata(base + ".cmf", 2)
            fhead["CTYPE1"] = ctype1
            fhead["CTYPE2"] = ctype2
            w = WCS(fhead)
            # There's an offset of 0.5 pixels, not sure why
            r, d = w.all_pix2world(cat["X_PSF"] + 0.5, cat["Y_PSF"] + 0.5, 1)
            cat['RA_PSF'] = r
            cat['DEC_PSF'] = d
            os.remove(base + ".cmf")
            cat.write(base + ".cmf", format='fits')
            fits.append(base + ".cmf", cat2)

        # 3d) Load the catalog (and logfile) and write final output file
        # Move the file to final location
        if os.path.exists(base + ".cmf"):
            outcatfile = dir + instcode + "/" + night + "/" + base + "/" + base + "_" + str(
                ccdnum) + ".psphot.cat.fits"
            outmdlfile = dir + instcode + "/" + night + "/" + base + "/" + base + "_" + str(
                ccdnum) + ".psphot.mdl.fits"
Esempio n. 28
0
def re_write_fits(in_file,
                  out_file,
                  no_neg=False,
                  smooth_arcmin=0.0,
                  rm_median=False,
                  flag_dist_arcmin=0.0,
                  normalize=False):
    """ 
    PURPOSE: This function extracts a map from a fits fil and modifies it 
    applying smoothing, baseline removal, flag of negative values, 
    and masking. The new image is then writtin in a new file.

    INPUT: - in_file (string): input file full name
           - out_file (string): (output file full name)
           - no_deg (bool): set negative values to zero (default is no)
           - smooth_arcmin (float): Gaussian smoothing FWHM, in arcmin, to apply
           - rm_median (bool): remove the median of the map (default is no)
           - flag_dist_arcmin (float): set to zero pixels beyond this limit, 
             from the center
           - normalize (bool): set this keyword to normalize the map to 1 (i.e.
             setting the integral sum(map)*reso^2 to 1)

    OUTPUT: - The new map is written in a new file
            - image (2d numpy array): the new map
            - header : the corresponding map header
    """

    #---------- Data extraction
    data = fits.open(in_file)[0]
    image = data.data
    wcs = WCS(data.header)
    reso_x = abs(wcs.wcs.cdelt[0])
    reso_y = abs(wcs.wcs.cdelt[1])
    Npixx = image.shape[0]
    Npixy = image.shape[1]
    fov_x = Npixx * reso_x
    fov_y = Npixy * reso_y

    #---------- Data modification
    if smooth_arcmin >= 0:
        sigma_sm = smooth_arcmin / 60.0 / np.array(
            [reso_x, reso_x]) / (2 * np.sqrt(2 * np.log(2)))
        image = ndimage.gaussian_filter(image, sigma=sigma_sm)

    if rm_median == True:
        image = image - np.median(image)

    if no_neg == True:
        image[image < 0] = 0.0

    if flag_dist_arcmin > 0:
        if Npixx / 2.0 != int(Npixx / 2.0):
            axisx = np.arange(-(Npixx - 1.0) // 2.0,
                              ((Npixx - 1.0) // 2.0) + 1.0)
        else:
            axisx = np.arange(-(Npixx - 1.0) // 2.0,
                              ((Npixx - 1.0) // 2.0) + 1.0) + 0.5
        if Npixy / 2.0 != int(Npixy / 2.0):
            axisy = np.arange(-(Npixy - 1.0) // 2.0,
                              ((Npixy - 1.0) // 2.0) + 1.0)
        else:
            axisy = np.arange(-(Npixy - 1.0) // 2.0,
                              ((Npixy - 1.0) // 2.0) + 1.0) + 0.5
        coord_y, coord_x = np.meshgrid(axisx, axisy, indexing='ij')
        radius_map = np.sqrt((coord_x * reso_x)**2 + (coord_y * reso_y)**2)
        image[radius_map > flag_dist_arcmin / 60.0] = 0.0

    if normalize == True:
        norm = np.sum(image) * reso_x * reso_y * (np.pi / 180.0)**2
        image = image / norm

    #---------- WCS construction
    w = WCS(naxis=2)
    w.wcs.crpix = wcs.wcs.crpix
    w.wcs.cdelt = wcs.wcs.cdelt
    w.wcs.crval = wcs.wcs.crval
    w.wcs.latpole = wcs.wcs.latpole
    w.wcs.lonpole = wcs.wcs.lonpole
    if (wcs.wcs.ctype[0] == "RA--TAN") or (wcs.wcs.ctype[1] == "DEC-TAN"):
        w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
    else:
        w.wcs.ctype = wcs.wcs.ctype

    #---------- Write FITS
    header = w.to_header()
    hdu = fits.PrimaryHDU(header=header)
    hdu.data = image
    hdu.writeto(out_file, overwrite=True)

    return image, header
Esempio n. 29
0
def test_rotation_angle(map_data, hpc_coord):
    header = make_fitswcs_header(map_data,
                                 hpc_coord,
                                 rotation_angle=90 * u.deg)
    wcs = WCS(header)
    np.testing.assert_allclose(wcs.wcs.pc, [[0, -1], [1, 0]], atol=1e-5)
Esempio n. 30
0
def make_postcards(fns, outdir, width=104, height=148, wstep=None, hstep=None):
    # Make sure that the output directory exists
    os.makedirs(outdir, exist_ok=True)

    # We'll assume that the filenames can be sorted like this (it is true for
    # the ETE-6 test data
    fns = list(sorted(fns))
    total_ffis = len(fns)
    # Save the middle header as the primary header
    middle_fn = fns[total_ffis // 2]
    data, primary_header = fitsio.read(middle_fn, 1, header=True)

    # Add the eleanor info to the header
    primary_header.add_record("COMMENT   ***********************")
    primary_header.add_record("COMMENT   *    eleanor INFO     *")
    primary_header.add_record("COMMENT   ***********************")
    primary_header.add_record(dict(name='AUTHOR', value='Adina D. Feinstein'))
    primary_header.add_record(dict(name='VERSION', value=__version__))
    primary_header.add_record(
        dict(name='GITHUB', value='https://github.com/afeinstein20/eleanor'))
    primary_header.add_record(
        dict(name='CREATED',
             value=strftime('%Y-%m-%d'),
             comment='eleanor file creation date (YYY-MM-DD)'))

    # Build the WCS for this middle exposure
    primary_wcs = WCS(primary_header)

    # Is this a raw frame? If so, there won't be any error information
    is_raw = primary_header["IMAGTYPE"].strip() == "uncal"

    # Set the output filename format
    sector = os.path.split(middle_fn)[-1].split("-")[
        1]  # Scrapes sector from the filename

    info = (sector, primary_header["CAMERA"], primary_header["CCD"],
            primary_header["IMAGTYPE"].strip())
    info_str = '{0}-{1}-{2}-{3}'.format(info[0], info[1], info[2], info[3])

    outfn_fmt = "hlsp_eleanor_tess_ffi_postcard-{0}-{{0:04d}}-{{1:04d}}.fits".format(
        info_str)
    outfn_fmt = os.path.join(outdir, outfn_fmt).format

    # Build the pointing model
    f = ffi(sector=int(info[0][1:]), camera=info[1], chip=info[2])
    f.local_paths = fns
    f.sort_by_date()
    pm = f.pointing_model_per_cadence()

    # We want to shift the WCS for each postcard so let's store the default
    # reference pixel
    crpix_h = float(primary_header["CRPIX1"])
    crpix_w = float(primary_header["CRPIX2"])

    # Work out the dimensions of the problem
    dtype = data.dtype
    shape = data.shape
    total_width, total_height = shape
    width = int(width)
    height = int(height)
    if wstep is None:
        wstep = width - 50
    if hstep is None:
        hstep = height - 50
    wstep = int(wstep)
    hstep = int(hstep)

    # Make a grid of postcard origin coordinates
    ws = np.arange(0, 2049, wstep)  #total_width - width + wstep + 1, wstep)
    hs = np.arange(44, 2093, hstep)  #total_height - height + hstep + 1, hstep)

    # Compute the total numbers for progress bars
    num_times = len(fns)
    total_num_postcards = len(ws) * len(hs)

    # Allocate the memory for the stacked FFIs
    all_ffis = np.empty((total_width, total_height, len(fns)),
                        dtype=dtype,
                        order="F")
    if not is_raw:
        all_errs = np.empty((total_width, total_height, len(fns)),
                            dtype=dtype,
                            order="F")

    s = int(sector[1::])
    metadata_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                'metadata', 's{0:04d}'.format(s))
    ffiindex = np.loadtxt(
        os.path.join(metadata_dir, 'cadences_s{0:04d}.txt'.format(s)))
    sc_fn = os.path.join(metadata_dir, 'target_s{0:04d}.fits'.format(s))

    # We'll have the same primary HDU for each postcard - this will store the
    # time dependent header info
    primary_cols = [
        "TSTART", "TSTOP", "BARYCORR", "DATE-OBS", "DATE-END", "BKG",
        "QUALITY", "FFIINDEX"
    ]
    primary_dtype = [
        np.float32, np.float32, np.float32, "O", "O", np.float32, np.int64,
        np.int64
    ]
    primary_data = np.empty(len(fns), list(zip(primary_cols, primary_dtype)))

    # Make sure that the sector, camera, chip, and dimensions are the
    # same for all the files
    for i, name in tqdm.tqdm(enumerate(fns), total=num_times):
        data, hdr = fitsio.read(name, 1, header=True)

        # FIXME: when `sector` is added to the header, we should check
        # it too!  -- still not added (dfm)
        new_shape = (hdr["NAXIS2"], hdr["NAXIS1"])
        new_info = (sector, hdr["CAMERA"], hdr["CCD"], hdr["IMAGTYPE"].strip())
        if shape != new_shape or new_info != info:
            raise ValueError(
                "the header info for '{0}' does not match".format(name))
        info = new_info

        # Save the info for the primary HDU
        for k, dtype in zip(primary_cols[0:len(primary_cols) - 3],
                            primary_dtype[0:len(primary_dtype) - 3]):
            if dtype == "O":
                primary_data[k][i] = hdr[k].encode("ascii")
            else:
                primary_data[k][i] = hdr[k]

        # Save the data
        all_ffis[:, :, i] = data

        if not is_raw:
            all_errs[:, :, i] = fitsio.read(name, 2)

    wmax, hmax = 2048, 2092

    quality = np.empty(len(fns))

    # Loop over postcards
    post_names = []
    with tqdm.tqdm(total=total_num_postcards) as bar:
        for i, h in enumerate(hs):
            for j, w in enumerate(ws):
                dw = width  #min(width, total_width - w)
                dh = height  #min(height, total_height - h)

                hdr = fitsio.FITSHDR(primary_header)

                if np.shape(all_ffis[w:w + dw, h:h + dh, :]) != (width, height,
                                                                 total_ffis):
                    if w + dw > wmax:
                        w = wmax - dw
                    if h + dh > hmax:
                        h = hmax - dh

                # Shift the reference pixel for the WCS to
                # account for the postcard location
                hdr.add_record(
                    dict(name="CRPIX1",
                         value=crpix_h - h,
                         comment="X reference pixel"))
                hdr.add_record(
                    dict(name="CRPIX2",
                         value=crpix_w - w,
                         comment="Y reference pixel"))

                # Shift TSTART and TSTOP in header to first TSTART and
                # last TSTOP from FFI headers
                tstart = primary_data['TSTART'][0]
                tstop = primary_data['TSTOP'][len(primary_data['TSTOP']) - 1]
                hdr.add_record(
                    dict(name='TSTART',
                         value=tstart,
                         comment='observation start time in BTJD'))
                hdr.add_record(
                    dict(name='TSTOP',
                         value=tstop,
                         comment='observation stop time in BTJD'))

                # Same thing as done for TSTART and TSTOP for DATE-OBS and DATE-END
                hdr.add_record(
                    dict(name='DATE-OBS',
                         value=primary_data['DATE-OBS'][0].decode("ascii"),
                         comment='TSTART as UTC calendar date'))
                hdr.add_record(
                    dict(name='DATE-END',
                         value=primary_data['DATE-END'][-1].decode("ascii"),
                         comment='TSTOP as UTC calendar date'))

                # Adding MJD time for start and stop end time
                tstart = Time(tstart + 2457000, format='jd').mjd
                tstop = Time(tstop + 2457000, format='jd').mjd
                hdr.add_record(
                    dict(name='MJD-BEG',
                         value=tstart,
                         comment='observation start time in MJD'))
                hdr.add_record(
                    dict(name='MJD-END',
                         value=tstop,
                         comment='observation end time in MJD'))

                # Save the postcard coordinates in the header
                hdr.add_record(
                    dict(name="POSTPIX1",
                         value=h,
                         comment="origin of postcard axis 1"))
                hdr.add_record(
                    dict(name="POSTPIX2",
                         value=w,
                         comment="origin of postcard axis 2"))

                xcen = h + 0.5 * dh
                ycen = w + 0.5 * dw

                outfn = outfn_fmt(int(xcen), int(ycen))
                post_names.append(outfn)

                rd = primary_wcs.all_pix2world(xcen, ycen, 1)
                hdr.add_record(
                    dict(name="CEN_X",
                         value=xcen,
                         comment=("central pixel of postcard in FFI")))
                hdr.add_record(
                    dict(name="CEN_Y",
                         value=ycen,
                         comment=("central pixel of postcard in FFI")))
                hdr.add_record(
                    dict(name="CEN_RA",
                         value=float(rd[0]),
                         comment="RA of central pixel"))
                hdr.add_record(
                    dict(name="CEN_DEC",
                         value=float(rd[1]),
                         comment="Dec of central pixel"))
                hdr.add_record(
                    dict(name="POST_H",
                         value=float(height),
                         comment="Height of postcard in pixels"))
                hdr.add_record(
                    dict(name="POST_W",
                         value=float(width),
                         comment="Width of postcard in pixels"))
                hdr.add_record(
                    dict(name="SECTOR",
                         value=sector[1::],
                         comment="TESS sector"))

                pixel_data = all_ffis[w:w + dw, h:h + dh, :] + 0.0

                bkg_array = []

                # Adds in quality column for each cadence in primary_data
                for k in range(len(fns)):
                    b = bkg(pixel_data[:, :, k])
                    primary_data[k][len(primary_cols) - 3] = b
                    pixel_data[:, :, k] -= b

                    primary_data[k][len(primary_cols) - 1] = ffiindex[k]

                    if i == 0 and j == 0 and k == 0:
                        print("Getting quality flags")
                        quality_array = set_quality_flags(
                            primary_data['TSTART'] - primary_data['BARYCORR'],
                            primary_data['TSTOP'] - primary_data['BARYCORR'],
                            sc_fn,
                            sector[1::],
                            new_info[1],
                            new_info[2],
                            pm=pm)
                    primary_data[k][len(primary_cols) - 2] = quality_array[k]

                # Saves the primary hdu
                fitsio.write(outfn, primary_data, header=hdr, clobber=True)

                # Save the image data
                fitsio.write(outfn, pixel_data)

                if not is_raw:
                    fitsio.write(outfn, all_errs[w:w + dw, h:h + dh, :])

                bar.update()
    return np.array(post_names)