コード例 #1
1
def convert_region_pix2wcs(region_dict, header):

    for region in region_dict:
        df = region_dict[region]

        # Create WCS object
        wcs_header = WCS(header)

        coords_pix = np.array([df['xpix'], df['ypix']]).T

        # convert to pixel
        coords_wcs = wcs_header.all_pix2world(coords_pix, 0)

        # write data to dataframe
        df['ra'], df['dec'] = coords_wcs[:,0], coords_wcs[:,1]

    return region_dict
コード例 #2
0
def test_sip2pv():
    """
    Test conversion of sip 2 pv keywords, ensure that both provide same ra/dec <--> x/y transforms.
    """

    sip_header = fits.Header.fromtextfile(os.path.join(dir_name, 'data/IRAC_3.6um_sip.txt'))
    control_header = sip_header.copy()
    naxis1 = sip_header['NAXIS1']
    naxis2 = sip_header['NAXIS2']
    x = np.linspace(1, naxis1, 10)
    y = np.linspace(1, naxis2, 10)
    xx, yy = np.meshgrid(x, y)
    pixargs = np.vstack([xx.reshape(-1), yy.reshape(-1)]).T

    sip_to_pv(sip_header)

    wsip = WCS(sip_header)
    wtpv = WCS(control_header)

    world1 = wsip.all_pix2world(pixargs, 1)
    world2 = wtpv.all_pix2world(pixargs, 1)

    npt.assert_equal(world1, world2)

    pix1 = wsip.all_world2pix(world1, 1)
    pix2 = wtpv.all_world2pix(world2, 1)
    npt.assert_almost_equal(pix1, pixargs, 4)
    npt.assert_almost_equal(pix2, pixargs, 4)
コード例 #3
0
def test_pv2sip():
    """
    Test conversion of pv 2 sip keywords, check to see that world2pix transform round trips and is equal for pv and
     sip keywords.
    """

    pv_header = fits.Header.fromtextfile(os.path.join(dir_name, 'data/PTF_r_chip01_tpv.txt'))
    control_header = pv_header.copy()
    naxis1 = pv_header['NAXIS1']
    naxis2 = pv_header['NAXIS2']
    x = np.linspace(1, naxis1, 10)
    y = np.linspace(1, naxis2, 10)
    xx, yy = np.meshgrid(x, y)
    pixargs = np.vstack([xx.reshape(-1), yy.reshape(-1)]).T

    pv_to_sip(pv_header)

    wsip = WCS(pv_header)
    wtpv = WCS(control_header)

    world1 = wsip.all_pix2world(pixargs, 1)
    world2 = wtpv.all_pix2world(pixargs, 1)

    npt.assert_equal(world1, world2)

    pix1 = wsip.all_world2pix(world1, 1)
    pix2 = wtpv.all_world2pix(world2, 1)

    npt.assert_almost_equal(pix1, pixargs, 4)
    npt.assert_almost_equal(pix2, pixargs, 4)
コード例 #4
0
def get_fits_limits(fits_image):
    """

    :param fits_image: The chosen image.
    :return: a dict with ra/dec limits above_ra, below_ra,
    above_dec_, below_dec
    """

    data, header = fits.getdata(fits_image, header=True)
    w = WCS(fits_image)

    above_x, above_y = header['NAXIS1'], header['NAXIS2']
    above_ra, above_dec = w.all_pix2world(above_x, above_y, 0)

    below_ra, below_dec = w.all_pix2world(0, 0, 0)

    # Useless, it is really important?
    # c = SkyCoord(ra=[above_ra, below_ra] * degree,
    #              dec=[above_dec, below_dec] * degree)

    ra = [above_ra, below_ra]
    dec = [above_dec, below_dec]

    limits = {
        'below_ra': float(min(ra)),
        'above_ra': float(max(ra)),
        'below_dec': float(min(dec)),
        'above_dec': float(max(dec))
    }

    return limits
コード例 #5
0
def wcs2xy(data, header, ydata, rotateWCS="False"):
    assert len(data.shape)==1
    wcs = WCS(header)
    print(wcs)
    pixel_x = np.arange(data.size)
    pixel_y = np.zeros((data.size,))
    if wcs.naxis==2:
        axisToUse = 1
        mask = (wcs.wcs.crval==1.0)|(np.abs(np.diag(wcs.wcs.cd))==1.0)
        print(mask)
        if mask.sum()==1:
            if mask[1]:
                axisToUse = 1-axisToUse

        if rotateWCS: axisToUse = 1-axisToUse
        if axisToUse: # for vertical spectrum
            pixel = np.array([pixel_y, pixel_x], dtype=np.int).T
            xdata = wcs.all_pix2world(pixel, 1)[:,1].flatten()
        else:
            pixel = np.array([pixel_x, pixel_y], dtype=np.int).T
            xdata = wcs.all_pix2world(pixel, 1)[:,0].flatten()
    else:
        xdata = wcs.all_pix2world(pixel_x.reshape((data.size,1)),1).flatten()
    inds = np.argsort(xdata)
    xdata = xdata[inds]
    ydata = ydata[inds]
    print("xdata range:", xdata[0], xdata[-1])
    return xdata, ydata
コード例 #6
0
def get_fits_limits(fits_image):
    """

    @param logger:
    @param fits_image: fits image

    @return limits: a dict with ra/dec limits above_ra, below_ra,
                    above_dec_, below_dec
    """
    # logger.info('getting limits of {} image'.format(fits_image))

    data, header = fits.getdata(fits_image, header=True)
    w = WCS(fits_image)

    above_x, above_y = header['NAXIS1'], header['NAXIS2']
    above_ra, above_dec = w.all_pix2world(above_x, above_y, 0)

    below_ra, below_dec = w.all_pix2world(0, 0, 0)

    limits = {
        'below_ra': float(above_ra),
        'above_ra': float(below_ra),
        'below_dec': float(below_dec),
        'above_dec': float(above_dec)
    }

    # check position
    # sometimes some values could be higher when are tagged as "lowest"
    return limits
コード例 #7
0
def pixel_to_world(fitsfile, x, y, ch=0):
    w = WCS(fitsfile)
    if w.wcs.naxis == 3:
        return w.all_pix2world(x, y, ch, 1)
    elif w.wcs.naxis == 2:
        return w.all_pix2world(x, y, 1)
    else:
        raise ValueError('Something wrong with the header.')
コード例 #8
0
def RunSearch(output):
    hdulist = fits.open(args.Image, memmap=True)
    data = 1.0 * hdulist[0].data[0][0]
    pb = fits.open(args.PBImage, memmap=True)[0].data[0][0]
    data = np.where(pb < args.PBLimit, np.nan, data)
    data = np.where(np.isnan(pb), np.nan, data)

    bmaj, bmin, factor, bpa, pix_size = GetBeam(args.Image)
    w = WCS(args.Image)
    data_aux = 1.0 * data
    sigma = GetBestSigma(data)
    print('RMS:', round(sigma * 1e6, 1), ' microJy/beam')
    data = 1.0 * data_aux
    data_aux = 0
    sigma, sn_lim, detections, fo = GetDetections(data, sigma)

    fo = 1.0 * hdulist[0].data[0][0]
    fo = np.nan_to_num(fo)

    if len(detections[0]) == 0:
        print('no detections', detections)
        return output
    flux_point_source, centro_x, centro_y = GetSources(detections, fo,
                                                       args.MinSN, 'Candidate',
                                                       sigma)
    flux_response = pb
    for i in range(len(centro_x)):
        print('Measuring properties for candidate:', i + 1, '/', len(centro_x))
        model_name = 'Candidate_ID' + str(i + 1) + '.pdf'
        aux = 'ID' + str(i + 1).zfill(2) + '\t'
        dec = w.all_pix2world(centro_x[i], centro_y[i], 0, 0, 0)[w.wcs.lat]
        ra = w.all_pix2world(centro_x[i], centro_y[i], 0, 0, 0)[w.wcs.lng]
        c = SkyCoord(ra=ra * u.degree, dec=dec * u.degree,
                     frame='icrs').to_string('hmsdms', sep=':', precision=3)
        ra, dec = c.split()
        aux = aux + ra + '\t' + dec + '\t' + str(round(sn_lim, 1)) + '\t'
        aux = aux + '%.1f\t%.1f\t%.1f\t' % (flux_point_source[i] * 1e6, sigma *
                                            1e6, flux_point_source[i] / sigma)

        aux_fits = GetFits(centro_x[i], centro_y[i], factor, fo, sigma,
                           model_name.replace('.pdf', '_3p.pdf'), bmaj, bmin,
                           bpa, True, pix_size)
        aux_fits2 = GetFits(centro_x[i], centro_y[i], factor, fo, sigma,
                            model_name.replace('.pdf', '_6p.pdf'), bmaj, bmin,
                            bpa, False, pix_size)

        aux = aux + '%.1f\t%.1f\t%.1f\t' % (aux_fits[2] * 1e6, aux_fits[0] *
                                            1e6, aux_fits[1] * 1e6)
        aux = aux + '%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.2f\n' % (
            aux_fits2[2] * 1e6, aux_fits2[0] * 1e6, aux_fits2[1] * 1e6,
            aux_fits[3], aux_fits2[3], aux_fits[4], aux_fits[5], aux_fits[6],
            aux_fits2[4], aux_fits2[5], aux_fits2[6], flux_response[int(
                centro_y[i])][int(centro_x[i])])
        output.write(aux)
        output.flush()

    return output
コード例 #9
0
 def get_ax_lims(self):
     xlim = self.ax.get_xlim()
     ylim = self.ax.get_ylim()
     wcs = WCS(self.h1)
     bl = wcs.all_pix2world([[xlim[0],ylim[0]]],1)
     tr = wcs.all_pix2world([[xlim[1],ylim[1]]],1)
     br = wcs.all_pix2world([[xlim[1],ylim[0]]],1)
     tl = wcs.all_pix2world([[xlim[0],ylim[1]]],1)
     
     return xlim,ylim,(bl,br,tl,tr)
コード例 #10
0
ファイル: test_wcs.py プロジェクト: rubab1/STScI-STIPS
def test_astro_image_wcs():
    this_dir = os.path.dirname(os.path.abspath(__file__))
    fits_path = os.path.join(this_dir, "wcs_test.fits")

    # Test 90 degree rotation
    ai = AstroImage.initFromFits(fits_path)
    default_wcs = ai._getWcs()  # WCS(ai.hdu)
    default_data = ai.hdu.data[:]

    ai.rotate(90)
    rotated_wcs = ai._getWcs()  # WCS(ai.hdu)
    rotated_data = ai.hdu.data[:]

    arr_size_x = ai.hdu.shape[0]
    arr_size_y = ai.hdu.shape[1]

    for x in range(arr_size_x):
        for y in range(arr_size_y):
            wcs_location = default_wcs.all_pix2world([[y, x]], 0)
            default_value = default_data[y][x]

            y, x = np.round(rotated_wcs.all_world2pix(wcs_location,
                                                      0)).astype(int)[0]
            assert (0 <= x < arr_size_x)
            assert (0 <= y < arr_size_y)
            rotated_value = rotated_data[y][x]

            if not np.isclose(default_value, rotated_value):
                print("Failed Values: ", default_value, rotated_value)
            assert np.isclose(default_value, rotated_value)

    # Test rescale
    ai = AstroImage.initFromFits(fits_path)
    default_wcs = WCS(ai.hdu)

    ai.rescale((ai.scale[0] * 2, ai.scale[0] * 2))
    rescale_wcs = WCS(ai.hdu)

    default_world = default_wcs.all_pix2world([[0, 0]], 0)
    rescale_world = rescale_wcs.all_pix2world([[0, 0]], 0)

    assert np.allclose(default_world, rescale_world)

    # Test crop
    ai = AstroImage.initFromFits(fits_path)
    default_wcs = WCS(ai.hdu)

    ai.crop(0, 0, 10, 10)
    crop_wcs = WCS(ai.hdu)

    default_world = default_wcs.all_pix2world([[5, 5]], 0)
    crop_world = crop_wcs.all_pix2world([[5, 5]], 0)

    assert np.allclose(default_world, crop_world)
コード例 #11
0
def fix_wcs(obj, cat, sr, header=None, use_header_wcs=False, maxmatch=1, order=6, fix=True):
    '''Get a refined WCS solution based on cross-matching of objects with catalogue on the sphere.
    Uses external 'fit-wcs' binary from Astrometry.Net suite'''

    if header is not None:
        width,height = header['NAXIS1'],header['NAXIS2']

        if use_header_wcs:
            wcs = WCS(header)

            if wcs:
                obj['ra'],obj['dec'] = wcs.all_pix2world(obj['x'], obj['y'], 0)
    else:
        width,height = int(np.max(obj['x'])),int(np.max(obj['y']))

    h = htm.HTM(10)
    oidx,cidx,dist = h.match(obj['ra'], obj['dec'], cat['ra'], cat['dec'], sr, maxmatch=1)

    dir = tempfile.mkdtemp(prefix='astrometry')
    wcs = None
    binname = None

    for path in ['.', '/usr/local', '/opt/local']:
        if os.path.isfile(posixpath.join(path, 'astrometry', 'bin', 'fit-wcs')):
            binname = posixpath.join(path, 'astrometry', 'bin', 'fit-wcs')
            break

    if binname:
        columns = [fits.Column(name='FIELD_X', format='1D', array=obj['x'][oidx] + 1),
                   fits.Column(name='FIELD_Y', format='1D', array=obj['y'][oidx] + 1),
                   fits.Column(name='INDEX_RA', format='1D', array=cat['ra'][cidx]),
                   fits.Column(name='INDEX_DEC', format='1D', array=cat['dec'][cidx])]
        tbhdu = fits.BinTableHDU.from_columns(columns)
        filename = posixpath.join(dir, 'list.fits')
        wcsname = posixpath.join(dir, 'list.wcs')

        tbhdu.writeto(filename, overwrite=True)

        os.system("%s -c %s -o %s -W %d -H %d -C -s %d" % (binname, filename, wcsname, width, height, order))

        if os.path.isfile(wcsname):
            header = fits.getheader(wcsname)
            wcs = WCS(header)

            if fix and wcs:
                obj['ra'],obj['dec'] = wcs.all_pix2world(obj['x'], obj['y'], 0)

    else:
        print("Astrometry.Net binary not found")

    #print order
    shutil.rmtree(dir)

    return wcs
コード例 #12
0
def extract_values(hdulist):
    image = hdulist[0].data
    header = hdulist[0].header
    w = WCS(header)

    fields = [
        'TELESCOPE', 'SBID', 'PROJECT', 'OBJECT', 'BMAJ', 'BMIN', 'BUNIT',
        'BTYPE', 'TMIN', 'TMAX'
    ]

    values = {}

    for field in fields:
        if field in header:
            values[field] = header[field]

    if 'RESTFRQ' in header:
        values['RESTFRQ'] = header['RESTFRQ']
    elif 'RESTFREQ' in header:
        values['RESTFRQ'] = header['RESTFREQ']

    # Now work with the wcs axes to get spatial and spectral data
    # w.wcs.print_contents()
    corners = []
    ra_size = header['NAXIS1']
    dec_size = header['NAXIS2']
    if header['NAXIS'] == 4:
        samples = [[0, 0, 0, 0], [0, dec_size - 1, 0, 0],
                   [ra_size - 1, dec_size - 1, 0, 0], [ra_size - 1, 0, 0, 0]]
        centre_point = [int(ra_size / 2), int(dec_size / 2), 0, 0]
    elif header['NAXIS'] == 3:
        samples = [[0, 0, 0], [0, dec_size - 1, 0],
                   [ra_size - 1, dec_size - 1, 0], [ra_size - 1, 0, 0]]
        centre_point = [int(ra_size / 2), int(dec_size / 2), 0]
    else:
        samples = [[0, 0], [0, dec_size - 1], [ra_size - 1, dec_size - 1],
                   [ra_size - 1, 0]]
        centre_point = [int(ra_size / 2), int(dec_size / 2)]
    for sample in samples:
        point = w.all_pix2world([sample], 0)[0]
        corners.append([point[0], point[1]])

    values['CORNERS'] = corners
    centre = w.all_pix2world([centre_point], 0)[0]
    values['RA'] = centre[0]
    values['DEC'] = centre[1]

    return values
コード例 #13
0
ファイル: hstphot.py プロジェクト: srodney/hstphot
def xy2radec(imfile_or_hdr, x, y, ext=0):
    """ Convert the given x,y pixel position into
    ra,dec sky coordinates (in decimal degrees) for the given image.

    NOTE : this program assumes the input position follows the fits convention,
    with the center of the lower left pixel at (1,1).  The numpy/scipy
    convention sets the center of the lower left pixel at (0,0).

    :param imfile_or_hdr: image filename or astropy.io.fits Header object
    """
    from astropy.io import fits
    from astropy.wcs import WCS

    if isinstance(imfile_or_hdr, str):
        header = fits.getheader(imfile_or_hdr, ext=ext)
    elif isinstance(imfile_or_hdr, fits.Header):
        header = imfile_or_hdr
    else:
        print("WARNING: could not convert x,y to ra,dec for %s" %
               str(imfile_or_hdr))
    # try:
    # alternate WCS construction may be necessary for ACS files ?
    # wcs = WCS(fobj=fobj, header=header)
    # except KeyError:
    wcs = WCS(header=header)
    # fobj.close()
    ra, dec = wcs.all_pix2world(x, y, 1)
    return ra, dec
コード例 #14
0
ファイル: sources.py プロジェクト: panoptes/PIAA
def _lookup_via_photutils(fits_file, wcs=None, *args, **kwargs):
    from photutils import DAOStarFinder
    data = fits.getdata(fits_file) - 2048  # Camera bias
    mean, median, std = sigma_clipped_stats(data)

    fwhm = kwargs.get('fwhm', 3.0)
    threshold = kwargs.get('threshold', 3.0)

    daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold * std)
    sources = daofind(data - median).to_pandas()

    sources.rename(columns={
        'xcentroid': 'x',
        'ycentroid': 'y',
    }, inplace=True)

    if wcs is None:
        header = fits_utils.getheader(fits_file)
        wcs = WCS(header)

    coords = wcs.all_pix2world(sources['x'], sources['y'], 1)

    sources['ra'] = coords[0]
    sources['dec'] = coords[1]

    return sources
コード例 #15
0
def getImgCenter(tpath, imgName, x, y):

    tools = AstroTools('/home/xy/Downloads/myresource/deep_data2/image_diff')
    fieldId, ra, dec = tools.getRaDec(tpath, imgName)
    fpar = 'sex_diff.par'
    sexConf = [
        '-DETECT_MINAREA', '10', '-DETECT_THRESH', '5', '-ANALYSIS_THRESH',
        '5', '-CATALOG_TYPE', 'FITS_LDAC'
    ]
    tmplCat, isSuccess = tools.runSextractor(imgName,
                                             tpath,
                                             tpath,
                                             fpar,
                                             sexConf,
                                             outSuffix='_ldac.fit')
    if not isSuccess:
        print("getDiffTemplate runSextractor failure2")
        return isSuccess, 0, 0

    tools.ldac2fits('%s/%s' % (tpath, tmplCat), '%s/ti_cat.fit' % (tpath))

    runSuccess = tools.runWCS(tpath, 'ti_cat.fit', ra, dec)

    if runSuccess:
        wcs = WCS('%s/ti_cat.wcs' % (tpath))
        ra, dec = wcs.all_pix2world(x, y, 1)

    return runSuccess, ra, dec
コード例 #16
0
ファイル: LinePlotter.py プロジェクト: jigonzal/LinePlotter
def PlotSPW2(ff, x, y, data, factor):
    # print 'plotting:',ff
    hdulist = fits.open(ff, memmap=True)
    w = WCS(hdulist[0].header)
    header = hdulist[0].header
    XX = []
    YY = []
    rms = []
    for i in range(len(data)):
        XX.append(i)
        YY.append(sum(data[i][y, x]))
        rms.append(np.std(data[i][data[i] != 0.0]))
    aux = numpy.array([
        x[0] * numpy.ones(len(XX)), y[0] * numpy.ones(len(XX)), XX,
        numpy.zeros(len(XX))
    ])
    aux = numpy.transpose(aux)
    aux = w.all_pix2world(aux, 0)
    aux = numpy.transpose(aux)
    XX = aux[2] / 1e9
    YY = numpy.array(YY) * 1000.0 * factor
    rms = np.array(rms) * 1000.0 * np.sqrt(len(y)) * np.sqrt(factor)
    # print len(y),1.0/factor
    rms[np.isnan(rms)] = 1.0
    return XX, YY, aux, rms
コード例 #17
0
class Mosaic:
    '''
    Class for opening an X-ray image and turning it into a binary mask.
    '''
    def __init__(self, filnam):
        self.struct =np.array([[0,0,1,0,0],
                               [0,1,1,1,0],
                               [0,0,1,0,0]]) #kernel for dilation
        self.hdul = pf.open(filnam)
        self.hdu = pf.open(filnam)[0]
        self.hdul.close() #closing necessary for freeing memory
        self.wcs = WCS(self.hdu.header)
        self.dil = nd.binary_dilation(self.hdu.data,iterations=10,structure=self.struct)
        #converting all nonzero elements to 1s
        self.nonzero = np.where(self.hdu.data !=0)
        self.bin_img = np.copy(self.hdu.data)
        self.bin_img[self.nonzero[0],self.nonzero[1]] = 1
        self.edges = feature.canny(self.dil,sigma=2)
        self.dil = nd.binary_erosion(self.dil,iterations=10)
        self.ny,self.nx = self.hdu.data.shape
        self.x = np.arange(self.nx)
        self.y = np.arange(self.ny)
        self.header = self.hdu.header
        self.X,self.Y = np.meshgrid(self.x,self.y)
        self.ra, self.dec = self.wcs.all_pix2world(self.X,self.Y,0)
        self.ra_cent = np.mean(self.ra) #self.header['RA_OBJ']
        self.dec_cent = np.mean(self.dec) #self.header['DEC_OBJ']
        self.coords = np.dstack((self.ra,self.dec))
    def get_intpixdist(self):
        return np.sqrt( ( (self.ra[0][0]-self.ra[0][1])*np.cos(np.radians((self.dec[0][0]+self.dec[0][1])/2)) )**2 
                       +(self.dec[0][0]-self.dec[0][1])**2)
コード例 #18
0
ファイル: preprocess.py プロジェクト: dhomeier/specview
def read_image(image, flux_unit=None, dispersion_unit=None, **kwargs):
    """Read 1D image

    Parameters
    ----------
    image: FITS Image HDU

    Returns
    -------
    SpectrumData

    Notes
    -----
    Assumes ONLY 1D and that the WCS has the dispersion
    definition. If not, its just pixels.
    """
    if len(image.data.shape) > 1:
        raise RuntimeError('Attempting to read an image with more than one '
                           'dimension.')
    wcs = WCS(image.header)
    spectrum = SpectrumData()
    unit = flux_unit if flux_unit else DEFAULT_FLUX_UNIT
    spectrum.set_y(image.data, unit=unit)
    unit = wcs.wcs.cunit[0] if not dispersion_unit else dispersion_unit
    spectrum.set_x(wcs.all_pix2world(range(image.data.shape[0]), 1)[0],
                   unit=unit)

    return spectrum
コード例 #19
0
    def test_curved_headers_are_all_one_pixel_apart(self, curved_trace):
        curved_trace["y1"] *= 1.1
        curved_trace["y2"] *= 1.2
        spt = SpectralTrace(curved_trace)
        pixel_size = 0.015
        hdrs = spt.get_curve_headers(pixel_size)
        dx = np.diff([hdr["CRVAL1D"] for hdr in hdrs])
        dy = np.diff([hdr["CRVAL2D"] for hdr in hdrs])
        dr = (dx**2 + dy**2)**0.5
        assert np.all(dr <= 1)

        # !!! PLOT this again to see issues
        if PLOTS:
            # orig world coords
            for row in curved_trace:
                x = [row["x0"], row["x2"]]
                y = [row["y0"], row["y2"]]
                len_mm = (np.diff(x)**2 + np.diff(y)**2)**0.5
                plt.plot(x, y, "k")
                plt.plot(x[0], y[0], "ko")
                plt.text(x[0], y[0], len_mm)

            # pixel coords
            for hdr in hdrs[::86]:
                xp = [0, hdr["NAXIS1"]]
                yp = [0, hdr["NAXIS2"]]
                wcs = WCS(hdr, key="D")
                # world coords
                xw, yw = wcs.all_pix2world(xp, yp, 1)
                plt.plot(xw, yw, "r")
                plt.plot(hdr["CRVAL1D"], hdr["CRVAL2D"], "ro")
                len_mm = (np.diff(xw)**2 + np.diff(yw)**2)**0.5
                plt.text(hdr["CRVAL1D"], hdr["CRVAL2D"], len_mm, color="red")
            plt.show()
コード例 #20
0
def read_image(image, flux_unit=None, dispersion_unit=None, **kwargs):
    """Read 1D image

    Parameters
    ----------
    image: FITS Image HDU

    Returns
    -------
    SpectrumData

    Notes
    -----
    Assumes ONLY 1D and that the WCS has the dispersion
    definition. If not, its just pixels.
    """
    if len(image.data.shape) > 1:
        raise RuntimeError('Attempting to read an image with more than one '
                           'dimension.')
    wcs = WCS(image.header)
    spectrum = SpectrumData()
    unit = flux_unit if flux_unit else DEFAULT_FLUX_UNIT
    spectrum.set_y(image.data, unit=unit)
    unit = wcs.wcs.cunit[0] if not dispersion_unit else dispersion_unit
    spectrum.set_x(wcs.all_pix2world(range(image.data.shape[0]), 1)[0],
                   unit=unit)

    return spectrum
コード例 #21
0
ファイル: prune.py プロジェクト: AstroLudwig/E0102
def Prune(data, hdr):
    # Constants
    c_dec = -72.03125
    c_ra = 16.00875
    d = 58  #kpc
    arcs = 22  #"

    w = WCS(hdr)
    # For each pixel we need to know it's ra/dec coordinates
    a, b = np.shape(data)
    row = np.arange(0, a)
    col = np.arange(0, b)
    row, col = np.meshgrid(row, col)
    row = row.flatten()
    col = col.flatten()

    all_ra, all_dec = w.all_pix2world(col, row, 1)
    # Numbers here are from Karin's paper.
    c1 = SkyCoord(c_ra * u.deg,
                  c_dec * u.deg,
                  distance=d * u.kpc,
                  frame='icrs')
    c2 = SkyCoord(all_ra * u.deg,
                  all_dec * u.deg,
                  distance=d * u.kpc,
                  frame='icrs')

    sep = c1.separation_3d(c2)
    radius = d * u.kpc * arcs / 206265

    look = np.where(sep > radius)
    data[row[look], col[look]] = np.nan

    return data
コード例 #22
0
ファイル: myPYTHON.py プロジェクト: SHAOqzyan/MWISPdbscan
	def getSpectraByLB(data,dataHeader,l,b):
		"""
		Parameters: data, dataHeader,l,b
		
		This function is used to get a voxel value from a data cube

		v, km/s
		the unit of returned veloicyt is kms

		return spectral,velocities
		"""
		wcs = WCS(dataHeader)
		xindex,yindex=wcs.all_world2pix(l,b,0,0)[0:2]
		xindex=int(round(xindex))
		yindex=int(round(yindex))
 
		
		##it is possible that the yindex,and xindex can exceed the boundary of spectrum
 
		if yindex> data.shape[1]-1 or xindex>data.shape[2]-1:
			return None, None
		spectral=data[:,yindex,xindex]
		##just for test
		#print  w.all_world2pix(0, 0, 0,0)
		#print data.shape[0]
		velocityIndex= range(data.shape[0])
		
		velocities=wcs.all_pix2world(0, 0,velocityIndex,0)[2]/1000.
 
		# 
		return spectral,velocities
コード例 #23
0
ファイル: wcs_conversions.py プロジェクト: jakevdp/spheredb
def test_SIP_distortion(filename):
    hdulist = fits.open(filename)
    wcs = WCS(hdulist[1].header)

    Xpix = [[1, 1]]
    Xfoc = wcs.sip_pix2foc(Xpix, 0)
    print wcs.sip_foc2pix(Xfoc, 0)
    return

    Xpix = 100 * np.random.random((40, 2))

    print "Conversion without SIP"
    Xworld = wcs.wcs_pix2world(Xpix, 0)
    Xpix2 = wcs.wcs_world2pix(Xworld, 0)
    print " - match:", np.allclose(Xpix, Xpix2)

    print "SIP by hand"
    Xworld = wcs.all_pix2world(Xpix, 0)
    tmp = wcs.sip_pix2foc(Xpix, 0)
    Xworld2 = wcs.wcs_pix2world(tmp, 0)
    print " - match:", np.allclose(Xworld, Xworld2)

    print "Conversion with SIP"
    tmp1 = wcs.sip_pix2foc(Xpix, 0)
    Xworld = wcs.wcs_pix2world(tmp1, 0)
    tmp2 = wcs.wcs_world2pix(Xworld, 0)
    Xpix2 = wcs.sip_foc2pix(tmp2, 0)
    print " - match:", np.allclose(tmp1, tmp2)
    print " - match:", np.allclose(Xpix, Xpix2)
    print Xpix - Xpix2
コード例 #24
0
ファイル: prune.py プロジェクト: AstroLudwig/E0102
def SelectRegionalData(data, hdr, origin_ra, origin_dec, radius):
    # Constants
    w = WCS(hdr)
    c_ra = origin_ra
    c_dec = origin_dec
    d = 58  #kpc
    arcs = radius
    data = np.copy(data)

    # For each pixel we need to know it's ra/dec coordinates
    a, b = np.shape(data)
    row = np.arange(0, a)
    col = np.arange(0, b)
    row, col = np.meshgrid(row, col)
    row = row.flatten()
    col = col.flatten()

    all_ra, all_dec = w.all_pix2world(col, row, 1)

    c1 = SkyCoord(c_ra * u.deg,
                  c_dec * u.deg,
                  distance=d * u.kpc,
                  frame='icrs')
    c2 = SkyCoord(all_ra * u.deg,
                  all_dec * u.deg,
                  distance=d * u.kpc,
                  frame='icrs')

    sep = c1.separation_3d(c2)
    radius = d * u.kpc * arcs / 206265

    look = np.where(sep > radius)
    data[row[look], col[look]] = np.nan

    return data
コード例 #25
0
def fourCorners(fname, ext=1):
    """
    Compute the four corners in celestial coordinates

    Parameters:
    -----------
    fname (string): path to FITS image
    ext (int): extension with WCS keywords and image (default 1)

    Returns:
    --------
    corners: 4x2 numpy array of the four corners in (ra, dec) pairs
    """
    import astropy.io.fits as fits
    from astropy.wcs import WCS
    import numpy as np
    hdu = fits.open(fname)
    w = WCS(hdu[ext].header)
    imdata = hdu[1].data
    grid = np.indices(imdata.shape)
    coords = np.vstack([grid[1][np.isfinite(imdata)],grid[0][np.isfinite(imdata)]]).T
    rect = minimum_bounding_rectangle(coords)
    #
    # Use the Scipy hull vertices because shapely's MultiPoint and convex_hull are so slow
    hull_points = coords[ConvexHull(coords).vertices]
    points = MultiPoint(hull_points)
    hull = points.convex_hull
    # From trial-and-error, the L-BFGS-B method works the best by far
    res = minimize(lambda x: fun(x, hull), rect.flat, method='L-BFGS-B',
               options={'ftol': 1e-4, 'disp': False, 'eps': 0.1})
    corners = w.all_pix2world(res.x.reshape(4,2),0)
    return(corners)
コード例 #26
0
ファイル: rusalt.py プロジェクト: saurabhwjha/rusalt
def split1d(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d????.fits')
    if len(fs) == 0:
        print "WARNING: No extracted spectra to split."
        iraf.cd('..')
        return

    for f in fs:
        hdu = pyfits.open(f.replace('x1d', 'fix'))
        chipgaps = get_chipgaps(hdu)
        # Throw away the first pixel as it almost always bad
        chipedges = [[1, chipgaps[0][0]], [chipgaps[0][1] + 1, chipgaps[1][0]],
                     [chipgaps[1][1] + 1, chipgaps[2][0]]]

        w = WCS(f)
        # Copy each of the chips out seperately. Note that iraf is 1 indexed
        # unlike python so we add 1
        for i in range(3):
            # get the wavelengths that correspond to each chip
            lam, _apnum, _bandnum = w.all_pix2world(chipedges[i], 0, 0, 0)
            iraf.scopy(f, f[:-5] + 'c%i' % (i + 1), w1=lam[0], w2=lam[1],
                       format='multispec', rebin='no',clobber='yes')
        hdu.close()
    iraf.cd('..')
コード例 #27
0
class CCD(Box):
    def __init__(self, index, hdu, valid):
        corners_x = [hdu.header['COR{}RA1'.format(i)] for i in [1, 2, 4, 3]]
        corners_y = [hdu.header['COR{}DEC1'.format(i)] for i in [1, 2, 4, 3]]
        super(CCD, self).__init__(corners_x, corners_y)
        self.index = index
        self.wcs = None
        self.hdu = hdu
        self.valid = valid

    @lazy_property
    def image(self):
        return self.hdu.data

    @lazy_property
    def header(self):
        return self.hdu.header

    def plot(self, vmin=0, vmax=100, cmap='gray', origin='lower'):
        fig, ax = plt.subplots()
        ax.set_title(f'CCD: {ccd_ind}')
        ax.imshow(self.image.T, vmin=vmin, vmax=vmax, cmap=cmap, origin=origin)
        return fig, ax

    def pix_to_world(self, pix_x, pix_y):
        if self.wcs is None:
            self.wcs = WCS(self.header)
        return self.wcs.all_pix2world(np.array([pix_x, pix_y]).T, 1)

    def world_to_pix(self, ra, dec):
        if self.wcs is None:
            self.wcs = WCS(self.header)
        return self.wcs.all_world2pix(np.array([ra, dec]).T, 1)
コード例 #28
0
def get_ra_dec_wcs(file_path, x, y):
    """Calculate the right ascension and declination from an image.

    Parameters
    ----------
    file_path : str
        Path to the image.

    x : float
        The x coordinate of the PSF.

    y : float
        The y coordinate of the PSF.

    Returns
    -------
    ra : float
        Right ascension.

    dec : float
        Declination.
    """

	hdu = fits.open(file_path)
	wcss = WCS(hdu[1].header, hdu)

	ra, dec = wcss.all_pix2world(x, y, 1)

	return(ra, dec)
コード例 #29
0
ファイル: rusaltD.py プロジェクト: Youssef15015/rusalt
def split1d(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d????.fits')
    if len(fs) == 0:
        print "WARNING: No extracted spectra to split."
        iraf.cd('..')
        return

    for f in fs:
        hdu = pyfits.open(f.replace('x1d', 'fix'))
        chipgaps = get_chipgaps(hdu)
        # Throw away the first pixel as it almost always bad
        chipedges = [[1, chipgaps[0][0]], [chipgaps[0][1] + 1, chipgaps[1][0]],
                     [chipgaps[1][1] + 1, chipgaps[2][0]]]

        w = WCS(f)
        # Copy each of the chips out seperately. Note that iraf is 1 indexed
        # unlike python so we add 1
        for i in range(3):
            # get the wavelengths that correspond to each chip
            lam, _apnum, _bandnum = w.all_pix2world(chipedges[i], 0, 0, 0)
            iraf.scopy(f, f[:-5] + 'c%i' % (i + 1), w1=lam[0], w2=lam[1],
                       format='multispec', rebin='no',clobber='yes')
        hdu.close()
    iraf.cd('..')
コード例 #30
0
    def __fill_coords(self):
        """
        Calculates RA and DEC coordinates from X and Y and stores in catalogue.
        This step is necessary if the RA, DEC coordinates are needed after a run
        of imcore.

        Currently this is a convenience function if one needs to work with the
        catalogue generated by imcore elsewhere. It is not used in the fitting
        procedure.
        """
    
        logger.info('filling RA & Dec coordinates')
    
        # Open image and catalogue
        img = fits.open(self.image_name)
        cat = fits.open(self.image_name.replace('.fit', '_cat.fit'), 
                                                           mode='update')
    
        # Use the header on the image to compute coordinates and update catalogue
        for i in range(len(img)-1):
            w=WCS(img[i+1].header)
            x=cat[i+1].data.field('x_coordinate')
            y=cat[i+1].data.field('y_coordinate')
            a,d = w.all_pix2world(x,y,1)
            cat[i+1].data['ra']=a
            cat[i+1].data['dec']=d
            cat.flush()
        
        cat.close()
        img.close()
コード例 #31
0
    def estimate_wcs_accuracies(self):
        """
        Estimates the accuracies of the WCS transformation
        :return:
        """
        wcs = WCS(self.path)
        ra, dec = wcs.all_pix2world(self.comb['xcentroid'],
                                    self.comb['ycentroid'], 1)
        self.comb['ra_2'] = ra
        self.comb['dec_2'] = dec
        dra = self.comb['ra_1'] - self.comb['ra_2']
        dra *= 3600
        ddec = self.comb['dec_1'] - self.comb['dec_2']
        ddec *= 3600

        print_coordinate_differences(self.comb, 'ra_1', 'ra_2', factor=3600.)
        print_coordinate_differences(self.comb, 'dec_1', 'dec_2', factor=3600.)

        self.comb['dra'] = dra
        self.comb['ddec'] = ddec
        fit, cov = np.polyfit(self.comb['dec_2'],
                              self.comb['ddec'],
                              1,
                              cov=True)
        print(fit, np.sqrt(np.diag(cov)))
コード例 #32
0
    def test_gets_headers_from_real_file(self):
        slit_hdr = ho._long_micado_slit_header()
        # slit_hdr = ho._short_micado_slit_header()
        wave_min = 0.8
        wave_max = 2.5
        spt = SpectralTraceList(
            filename="TRACE_SCI_15arcsec.fits",
            x_colname="x0",
            y_colname="y0",
            s_colname="s0",
        )

        params = {"wave_min": wave_min, "wave_max": wave_max,
                  "pixel_scale": 0.004, "plate_scale": 0.266666667}
        hdrs = spt.get_fov_headers(slit_hdr, **params)
        assert isinstance(spt, SpectralTraceList)

        print(len(hdrs))

        if PLOTS:
            spt.plot(wave_min, wave_max)

            # pixel coords
            for hdr in hdrs[::300]:
                xp = [0, hdr["NAXIS1"], hdr["NAXIS1"], 0]
                yp = [0, 0, hdr["NAXIS2"], hdr["NAXIS2"]]
                wcs = WCS(hdr, key="D")
                # world coords
                xw, yw = wcs.all_pix2world(xp, yp, 1)
                plt.plot(xw, yw, alpha=0.2)
            plt.show()
コード例 #33
0
ファイル: sources.py プロジェクト: battyone/PIAA
def _lookup_via_photutils(fits_file, wcs=None, *args, **kwargs):
    from photutils import DAOStarFinder
    data = fits.getdata(fits_file) - 2048  # Camera bias
    mean, median, std = sigma_clipped_stats(data)

    fwhm = kwargs.get('fwhm', 3.0)
    threshold = kwargs.get('threshold', 3.0)

    daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold * std)
    sources = daofind(data - median).to_pandas()

    sources.rename(columns={
        'xcentroid': 'x',
        'ycentroid': 'y',
    },
                   inplace=True)

    if wcs is None:
        header = fits_utils.getheader(fits_file)
        wcs = WCS(header)

    coords = wcs.all_pix2world(sources['x'], sources['y'], 1)

    sources['ra'] = coords[0]
    sources['dec'] = coords[1]

    return sources
コード例 #34
0
def aspcapStar_loader(file_obj, **kwargs):
    """
    Loader for APOGEE aspcapStar files.

    Parameters
    ----------
    file_obj: str or file-like
        FITS file name or object (provided from name by Astropy I/O Registry).

    Returns
    -------
    data: Spectrum1D
        The spectrum that is represented by the data in this table.
    """

    with read_fileobj_or_hdulist(file_obj, **kwargs) as hdulist:
        header = hdulist[0].header
        meta = {'header': header}
        wcs = WCS(hdulist[1].header)

        data = hdulist[1].data  # spectrum in the first extension
        unit = def_unit('arbitrary units')

        uncertainty = StdDevUncertainty(hdulist[2].data)

    # dispersion from the WCS but convert out of logspace
    dispersion = 10**wcs.all_pix2world(np.arange(data.shape[0]), 0)[0]
    dispersion_unit = Unit('Angstrom')

    return Spectrum1D(data=data * unit,
                      uncertainty=uncertainty,
                      spectral_axis=dispersion * dispersion_unit,
                      meta=meta,
                      wcs=wcs)
コード例 #35
0
def make_ratio_map(fitsimage, ra0, dec0, stride=16000000, outname=None):
    """Make a map of ratio of dOmega."""

    hdu = fits.open(fitsimage)
    try:
        projection = check_projection(hdu[0].header)
    except ValueError:
        logger.warning("dOmega ratios only valid for SIN and ZEA projections.")
        raise
    else:

        shape = np.squeeze(hdu[0].data).shape
        arr = np.full(shape, np.nan)

        w = WCS(hdu[0].header).celestial

        indices=  np.indices(shape)
        y = indices[0].flatten()
        x = indices[1].flatten()
        n = len(x) 

        for i in range(0, n, stride):

            r, d = w.all_pix2world(x[i:i+stride], y[i:i+stride], 0)
            factors = dOmega(r, d, ra0, dec0)

            arr[y[i:i+stride], x[i:i+stride]] = 1. / factors

    if outname is None:
        # return hdu instead of writing file - avoid unnecessary file creation
        hdu[0].data = arr
        return hdu
    else:
        fits.writeto(outname, arr, strip_wcsaxes(hdu[0].header), overwrite=True)
コード例 #36
0
ファイル: Diffusion_Vis.py プロジェクト: AstroLudwig/E0102
    def Prune(data,hdr):
        # Numerical Values from Sandstrom 2009
        c_dec = -72.03125; c_ra = 16.00875
        d = 61 #kpc
        arcs = 22

        w = WCS(hdr)
        # For each pixel we need to know it's ra/dec coordinates
        a,b = np.shape(data)
        row = np.arange(0,a); col = np.arange(0,b)
        row,col=np.meshgrid(row,col)
        row=row.flatten(); col=col.flatten()

        all_ra, all_dec = w.all_pix2world(col,row,1)
        c1 = SkyCoord(c_ra*u.deg, c_dec*u.deg, distance=d*u.kpc, frame='icrs')
        c2 = SkyCoord(all_ra*u.deg, all_dec*u.deg, distance=d*u.kpc, frame='icrs')

        sep = c1.separation_3d(c2)
        radius = d*u.kpc*arcs/206265

        look =np.where(sep > radius)

        data[row[look],col[look]] = 0

        return data
コード例 #37
0
def test_adding_markers_as_world_recovers_with_get_markers():
    """
    Make sure that our internal conversion from world to pixel
    coordinates doesn't mess anything up.
    """
    npix_side = 100
    fake_image = np.random.randn(npix_side, npix_side)
    wcs = WCS(naxis=2)
    wcs.wcs.crpix = (fake_image.shape[0] / 2, fake_image.shape[1] / 2)
    wcs.wcs.ctype = ('RA---TAN', 'DEC--TAN')
    wcs.wcs.crval = (314.275419158, 31.6662781301)
    wcs.wcs.pc = [[0.000153051015113, -3.20700931602e-05],
                  [3.20704370872e-05, 0.000153072382405]]
    fake_ccd = CCDData(data=fake_image, wcs=wcs, unit='adu')
    iw = ImageWidget(pixel_coords_offset=0)
    iw.load_nddata(fake_ccd)
    # Get me 100 positions please, not right at the edge
    marker_locs = np.random.randint(10,
                                    high=npix_side - 10,
                                    size=(100, 2))
    marks_pix = Table(data=marker_locs, names=['x', 'y'])
    marks_world = wcs.all_pix2world(marker_locs, 0)
    marks_coords = SkyCoord(marks_world, unit='degree')
    mark_coord_table = Table(data=[marks_coords], names=['coord'])
    iw.add_markers(mark_coord_table, use_skycoord=True)
    result = iw.get_markers()
    # Check the x, y positions as long as we are testing things...
    np.testing.assert_allclose(result['x'], marks_pix['x'])
    np.testing.assert_allclose(result['y'], marks_pix['y'])
    np.testing.assert_allclose(result['coord'].ra.deg,
                               mark_coord_table['coord'].ra.deg)
    np.testing.assert_allclose(result['coord'].dec.deg,
                               mark_coord_table['coord'].dec.deg)
コード例 #38
0
ファイル: astrometry.py プロジェクト: ebachelet/banzai
def add_ra_dec_to_catalog(image):
    image_wcs = WCS(image.header)
    ras, decs = image_wcs.all_pix2world(image.catalog['x'], image.catalog['y'], 1)
    image.catalog['ra'] = ras
    image.catalog['dec'] = decs
    image.catalog['ra'].unit = 'degrees'
    image.catalog['dec'].unit = 'degrees'
    image.catalog['ra'].description = 'Right Ascension'
    image.catalog['dec'].description = 'Declination'
コード例 #39
0
ファイル: rusalt.py プロジェクト: saurabhwjha/rusalt
def spectoascii(fname, asciiname, ap=0):
    hdu = pyfits.open(fname)
    w = WCS(fname)
    # get the wavelengths of the pixels
    npix = hdu[0].data.shape[2]
    lam = w.all_pix2world(np.linspace(0, npix - 1, npix), 0, 0, 0)[0]
    spec = hdu[0].data[0, ap]
    specerr = hdu[0].data[3, ap]
    np.savetxt(asciiname, np.array([lam, spec, specerr]).transpose())
    hdu.close()
コード例 #40
0
def _create_wcs_from_offsets(adinput, adref, center_of_rotation=None):
    """
    This function uses the POFFSET, QOFFSET, and PA header keywords to create
    a new WCS for an image. Its primary role is for GNIRS. For ease, it works
    out the (RA,DEC) of the centre of rotation in the reference image and
    determines where in the input image this is.

    Parameters
    ----------
    adinput: AstroData
        The input image whose WCS needs to be rewritten
    adreference: AstroData
        The reference image with a trustworthy WCS
    center_of_rotation: 2-tuple
        Location of rotation center (x, y)
    """
    log = logutils.get_logger(__name__)
    if len(adinput) != len(adref):
        log.warning("Number of extensions in input files are different. "
                    "Cannot correct WCS.")
        return adinput

    log.stdinfo("Updating WCS of {} based on {}".format(adinput.filename,
                                                        adref.filename))
    try:
        xdiff = adref.detector_x_offset() - adinput.detector_x_offset()
        ydiff = adref.detector_y_offset() - adinput.detector_y_offset()
        pa1 = adref.phu['PA']
        pa2 = adinput.phu['PA']
    except (KeyError, TypeError):  # TypeError if offset is None
        log.warning("Cannot obtain necessary offsets from headers "
                    "so no change will be made")
        return adinput

    # We expect mosaicked inputs but there's no reason why this couldn't
    # work for all extensions in an image
    for extin, extref in zip(adinput, adref):
        # Will need to have some sort of LUT here eventually. But for now...
        if center_of_rotation is None:
            center_of_rotation = (630.0, 520.0) if 'GNIRS' in adref.tags \
                else tuple(0.5*x for x in extref.data.shape[::-1])

        wcsref = WCS(extref.hdr)
        ra0, dec0 = wcsref.all_pix2world(center_of_rotation[0],
                                         center_of_rotation[1], 1)
        extin.hdr['CRVAL1'] = float(ra0)
        extin.hdr['CRVAL2'] = float(dec0)
        extin.hdr['CRPIX1'] = center_of_rotation[0] - xdiff
        extin.hdr['CRPIX2'] = center_of_rotation[1] - ydiff
        cd = models.Rotation2D(angle=pa1-pa2)(*wcsref.wcs.cd)
        extin.hdr['CD1_1'] = cd[0][0]
        extin.hdr['CD1_2'] = cd[0][1]
        extin.hdr['CD2_1'] = cd[1][0]
        extin.hdr['CD2_2'] = cd[1][1]
    return adinput
コード例 #41
0
def get_center(infile):
    f = pyfits.open(infile)
    if f[0].data is None:
        hdu = f[1]
    else:
        hdu = f[0]

    wcs = WCS(hdu.header)
    cx, cy = hdu.header["NAXIS1"] *.5, hdu.header["NAXIS2"] *.5
    radec_ = wcs.all_pix2world([(cx, cy)], 0)

    ra, dec = radec_[0]
    return ra, dec
コード例 #42
0
ファイル: boxbar_utils.py プロジェクト: deprecated/orion-west
def update_box_file(box_file, bar2knot_map):
    """Add the knot coordinate ID into all the boxes"""
    # Each box_file has the boxes for one slit
    slit_boxes = pyregion.open(box_file)
    # Also open the fits file associated with this slit
    slit_name = box_file.replace(
        os.path.join(REGION_DIR, 'pvboxes-'), '').replace('.reg', '')
    fits_name = os.path.join(FITS_DIR, slit_name) + '-ha-vhel.fits'
    hdu, = fits.open(fits_name)
    # Get the normal WCS together with the 'V' alternative WCS
    w = WCS(hdu)
    ww = WCS(hdu, key='V')
    newboxes = []
    for b in slit_boxes:
        # Check that it really is a box and that coordinates are in
        # the correct format
        if b.name == 'box' and b.coord_format == 'image':
            # Extract slit pixel coordinates
            # ii is along velocity axis
            # jj is along slit length
            ii, jj, dii, djj, angle = b.coord_list
            # Find the start/end coordinate along the slit
            jj1, jj2 = jj - 0.5*djj, jj + 0.5*djj
            # Then use alt WCS to find velocity plus both x and y
            [v, _], [x1, x2], [y1, y2] = ww.all_pix2world(
                [ii, ii], [jj1, jj2], [0, 0], 0)
            # Convert velocity from m/s -> km/s
            v /= 1000.0
            # Use tuple of rounded coordinates as the key
            key = tuple(['{:.1f}'.format(_) for _ in [x1, y1, x2, y2]])
            try: 
                coord_id = bar2knot_map[key]
                bars_remaining.remove(key)
            except KeyError:
                print('  '*2, 'Failed to match key', key)
                print('  '*3, ii, jj, dii, djj)
                if v > 0.0:
                    coord_id = 'RED KNOT ({:+.0f})'.format(5.0*round(v/5))
                else:
                    coord_id = 'LOST KNOT ({:+.0f})'.format(5.0*round(v/5))
                print('  '*3, coord_id)

            newbox = BOX_FMT.format(ii, jj, dii, djj, angle, coord_id)
            newboxes.append(newbox)


    newbox_file = box_file.replace('pvboxes', 'pvboxes-knots')
    with open(newbox_file, 'w') as f:
        f.write(BOX_HEADER)
        f.write('\n'.join(newboxes))
    return None
コード例 #43
0
ファイル: boxbar_utils.py プロジェクト: deprecated/orion-west
def find_knot_coord_ids(knots):
    """Find coordinate ID for each knot"""
    coord_ids = {}
    imhdu = fits.open('new-slits-ha-allvels.fits')['scaled']
    imwcs = WCS(imhdu.header)
    for knot_id, knot_data in knots.items():
        x = [0.5*(x1 + x2) for x1, _, x2, _ in knot_data['coords']]
        y = [0.5*(y1 + y2) for _, y1, _, y2 in knot_data['coords']]
        weights = knot_data['width']
        x0 = np.average(x, weights=weights)
        y0 = np.average(y, weights=weights)
        [ra], [dec] = imwcs.all_pix2world([x0], [y0], 0)
        coord_ids[knot_id] = radec2ow(ra, dec)
        v0 = np.average(knot_data['vel'], weights=weights)
        coord_ids[knot_id] += ' ({})'.format(int(round(v0/5.0)*5.0))
    return coord_ids
コード例 #44
0
    def test_world(self, file, view):
        p = path(file)
        d = fits.getdata(p)
        wcs = WCS(p)
        c = SpectralCube(d, wcs)

        shp = d.shape
        inds = np.indices(d.shape)
        pix = np.column_stack([i.ravel() for i in inds[::-1]])
        world = wcs.all_pix2world(pix, 0).T

        world = [w.reshape(shp) for w in world]
        world = [w[view] * u.Unit(wcs.wcs.cunit[i])
                 for i, w in enumerate(world)][::-1]

        w2 = c.world[view]
        for result, expected in zip(w2, world):
            assert_allclose(result, expected)
コード例 #45
0
ファイル: c2h_figures.py プロジェクト: fjdu/rac-2d
def load_fits_image(fname):
    hdulist = fits.open(fname)
    hdu_using = hdulist[0]
    header = hdu_using.header

    w = WCS(fname)
    ra_s, dec_s, _, _ = w.all_pix2world(np.arange(header['naxis1']),
                                        np.arange(header['naxis2']),
                                        0, 0, 0)
    d = hdu_using.data.squeeze()

    try:
        bmaj, bmin, bpa = header['bmaj'], header['bmin'], header['bpa']
    except:
        bmaj, bmin, bpa = None, None, None

    hdulist.close()
    return ra_s, dec_s, d, bmaj, bmin, bpa
コード例 #46
0
ファイル: HealpyFunc.py プロジェクト: tdelubac/healpy-libs
def MakeMapWCS(fitsfile,output=None,nside=256,nest=True,norm=True,masked=True):
    #
    # Save map as healpy format from WCS fits input
    #
    from astropy.io import fits as pf 
    from astropy.wcs import WCS
    import numpy as np
    import healpy as hp
    
    hdulist = pf.open(fitsfile)
    Cat = hdulist[0].data
    w = WCS(fitsfile)
    hdulist.close()

    pixarea = hp.nside2pixarea(nside,degrees=True)
    print 'nside = ',nside,' --> Pixel area (deg2) = ',pixarea

    x = np.arange(len(Cat[0]))
    y = np.zeros(len(Cat[0]))

    for i in np.arange(len(Cat)):
        if (i==0):
            continue
        x = np.concatenate((x,np.arange(len(Cat[0]))))
        y = np.concatenate((y,np.ones(len(Cat[0]))*i))
        print len(x),len(y)
    radec = w.all_pix2world(x,y,1)
    
    tiles  = hp.ang2pix(nside,-radec[1]*np.pi/180.+np.pi/2.,radec[0]*np.pi/180.,nest)
    npix = hp.nside2npix(nside)
    n_hit_selec = np.zeros(npix)
    val = np.zeros(npix)
    for ix,iy,itile in zip(x,y,tiles):
        n_hit_selec[itile]+=1
        val[itile]+=Cat[iy][ix]
    
    val[n_hit_selec>0]/=n_hit_selec[n_hit_selec>0]
    if masked == True:
        val = MaskBorders(val)
    if output == None:
        return val
    else:
        hp.write_map(output,val,nest)   
        return
コード例 #47
0
ファイル: sdss.py プロジェクト: astropy/specutils
def spSpec_loader(file_name, **kwargs):
    """
    Loader for SDSS-I/II spSpec files.

    Parameters
    ----------
    file_name: str
        The path to the FITS file

    Returns
    -------
    data: Spectrum1D
        The spectrum that is represented by the data in this table.
    """
    name = os.path.basename(file_name.rstrip(os.sep)).rsplit('.', 1)[0]
    hdulist = fits.open(file_name, **kwargs)

    header = hdulist[0].header
    meta = {'header': header}
    wcs = WCS(hdulist[0].header)

    data = hdulist[0].data[0, :]
    unit = Unit('1e-17 erg / (Angstrom cm2 s)')

    uncertainty = StdDevUncertainty(hdulist[0].data[2, :] * unit)

    # dispersion from the WCS but convert out of logspace
    # dispersion = 10**wcs.all_pix2world(np.arange(data.shape[0]), 0)[0]
    dispersion = 10**wcs.all_pix2world(np.vstack((np.arange(data.shape[0]),
                                                  np.zeros((data.shape[0],)))).T,
                                       0)[:, 0]
    # dispersion = 10**hdulist[1].data['loglam']
    dispersion_unit = Unit('Angstrom')

    mask = hdulist[0].data[3, :] != 0
    hdulist.close()

    return Spectrum1D(flux=data * unit,
                      spectral_axis=dispersion * dispersion_unit,
                      uncertainty=uncertainty,
                      meta=meta,
                      mask=mask)
def grab_connected_postage_stamps(sci_frame, ref_frame, yc, xc, Yrange=50, Xrange=50):
    """Example function with types documented in the docstring.

        `PEP 484`_ type annotations are supported. If attribute, parameter, and
        return types are annotated according to `PEP 484`_, they do not need to be
        included in the docstring:

        Args:
            param1 (int): The first parameter.
            param2 (str): The second parameter.

        Returns:
            bool: The return value. True for success, False otherwise.

        .. _PEP 484:
            https://www.python.org/dev/peps/pep-0484/

    """
    sciWCS      = WCS(sci_frame[0].header)
    refWCS      = WCS(ref_frame[0].header)
    
    refdata     = ref_frame[0].data.copy()
    
    refdata[where(isnan(refdata))]  = median(refdata[where(~isnan(refdata))])
    
    scidata     = sci_frame[0].data.copy()
    scidata[where(isnan(scidata))]  = median(scidata[where(~isnan(scidata))])
    sciSubframe    = [  [int(round(yc-Yrange)), int(round(yc+Yrange))] ,
                        [int(round(xc-Xrange)), int(round(xc+Xrange))]]
    
    cometRA, cometDEC            = array(sciWCS.all_pix2world(xc,yc,zc))
    refPixCometX, refPixCometY   = array(refWCS.all_world2pix(cometRA, cometDEC, 0.0))
    
    refSubframe    = [  [int(round(refPixCometY - Yrange))+1,int(round(refPixCometY + Yrange))+1] ,
                        [int(round(refPixCometX - Xrange))+1,int(round(refPixCometX + Xrange))+1]]
    
    sciSubData  = scidata[sciSubframe[y][0]:sciSubframe[y][1],sciSubframe[x][0]:sciSubframe[x][1]]
    refSubData  = rot90(refdata[refSubframe[y][0]:refSubframe[y][1],refSubframe[x][0]:refSubframe[x][1]],2)
    
    return sciSubData, refSubData
コード例 #49
0
ファイル: apogee.py プロジェクト: astropy/specutils
def apStar_loader(file_name, **kwargs):
    """
    Loader for APOGEE apStar files.

    Parameters
    ----------
    file_name: str
        The path to the FITS file

    Returns
    -------
    data: Spectrum1D
        The spectrum that is represented by the data in this table.
    """
    name = os.path.basename(file_name.rstrip(os.sep)).rsplit('.', 1)[0]
    hdulist = fits.open(file_name, **kwargs)

    header = hdulist[0].header
    meta = {'header': header}
    wcs = WCS(hdulist[1].header)

    data = hdulist[1].data[0, :]  # spectrum in the first row of the first extension
    unit = Unit('1e-17 erg / (Angstrom cm2 s)')

    uncertainty = StdDevUncertainty(hdulist[2].data[0, :])

    # dispersion from the WCS but convert out of logspace
    # dispersion = 10**wcs.all_pix2world(np.arange(data.shape[0]), 0)[0]
    dispersion = 10**wcs.all_pix2world(np.vstack((np.arange(data.shape[0]),
                                                  np.zeros((data.shape[0],)))).T,
                                       0)[:, 0]
    dispersion_unit = Unit('Angstrom')
    hdulist.close()

    return Spectrum1D(data=data * unit,
                      uncertainty=uncertainty,
                      dispersion=dispersion * dispersion_unit,
                      meta=meta,
                      wcs=wcs)
コード例 #50
0
ファイル: select_stars.py プロジェクト: bjanesh/uchvc-tools
output_cmd_pdf = 'testcmd.pdf'

width = 0.2 # in mags
# dm = 21.69

###################
g_m_iso, i_m_iso = np.loadtxt(iso_filename, usecols=(0,1), unpack=True)
g_ierr, ix, iy, i_ierr, g_mag,i_mag,gmi = np.loadtxt(photcalib_filename, usecols=(5,6,7,10,11,12,13), unpack=True)
gmi_err = np.sqrt(g_ierr**2 + i_ierr**2)

# if you get an error about loading the WCS, uncomment the following lines to
# delete the pipeline WCS keywords from the header
# from pyraf import iraf
# iraf.imutil.hedit(images=wcs_source_image_filename, fields='PV*', delete='yes', verify='no')
w = WCS(wcs_source_image_filename)
ra, dec = w.all_pix2world(ix, iy, 1)

# i_m_iso = i_iso + dm
gi_iso = g_m_iso - i_m_iso

colors_left = gi_iso - width/2.0
colors_right = gi_iso + width/2.0

colors = np.concatenate((colors_left, np.flipud(colors_right)))
mags = np.concatenate((i_m_iso, np.flipud(i_m_iso)))

verts = zip(colors, mags)		# set up the Path necessary for testing membership
cm_filter = Path(verts)

stars_f = np.empty_like(gmi, dtype=bool)
for i in range(len(gmi)) : 
コード例 #51
0
def fit_knot_unified(hdu, j1, j2, u0, lineid='nii'):

    NS, NV = hdu.data.shape
    w = WCS(hdu.header)
    vels, _ = w.all_pix2world(np.arange(NV), [0]*NV, 0)
    vels /= 1000.0

    # Ensure we don't go out of bounds
    j1 = max(j1, 0)
    j2 = min(j2, NS)
    print('Slit pixels {}:{} out of {}'.format(j1, j2, NS))

    knotspec = hdu.data[j1:j2, :].sum(axis=0)
    # make sure all pixels are positive, since that helps the fitting/plotting
    knotspec -= knotspec.min()

    # Levenberg-Marquardt for easy jobs
    lmfitter = SherpaFitter(statistic='chi2',
                            optimizer='levmar',
                            estmethod='confidence')

    # Simulated annealing for trickier jobs
    safitter = SherpaFitter(statistic='chi2',
                            optimizer='neldermead',
                            estmethod='covariance')

    # The idea is that this strategy should work for all knots

    # Estimate error from the BG: < -120 or > +100
    bgmask = np.abs(vels + 10.0) >= 110.0
    bgerr = np.std(knotspec[bgmask]) * np.ones_like(vels)

    # Define core as [-10, 50], or 20 +/- 30
    coremask = np.abs(vels - 20.0) < 30.0

    # Fit to the BG with constant plus Lorentz
    try: 
        vmean = np.average(vels[coremask], weights=knotspec[coremask])
    except ZeroDivisionError:
        vmean = 15.0

    bgmodel = lmfitter(_init_bgmodel(vmean),
		       vels[bgmask], knotspec[bgmask],
		       err=bgerr[bgmask])
    # Now freeze the BG model and add it to the initial core model
    #bgmodel['Lorentz'].fixed['amplitude'] = True
    #bgmodel['Constant'].fixed['amplitude'] = True

    # Increase the data err in the bright part of the line to mimic Poisson noise
    # Even though we don't know what the normalization is really, we will guess ...
    spec_err = bgerr + POISSON_SCALE*np.sqrt(knotspec)


    ## Now for the exciting bit, fit everything at once
    ##
    knotmask = np.abs(vels - u0) <= KNOT_WIDTH
    # For low-velocity knots, we need to exclude positive velocities
    # from the mask, since they will have large residual errors from
    # the core subtraction
    knotmask = knotmask & (vels < 0.0)

    # Start off with the frozen BG model
    fullmodel = bgmodel.copy()
    core_components = list(fullmodel.submodel_names)

    # Add in a model for the core
    DV_INIT = [-15.0, -5.0, 5.0, 10.0, 30.0]
    NCORE = len(DV_INIT)
    BASE_WIDTH = 10.0 if lineid == 'ha' else 5.0
    W_INIT = [BASE_WIDTH]*4 + [1.5*BASE_WIDTH]
    for i in range(NCORE):
        v0 = vmean + DV_INIT[i]
        w0 = W_INIT[i]
        component = 'G{}'.format(i)
        fullmodel += Gaussian1D(
            3.0, v0, w0,
            bounds={'amplitude': [0, None],
                    'mean': [v0 - 10, v0 + 10],
                    'stddev': [w0, 1.5*w0]},
            name=component)
        core_components.append(component)

    # Now, add in components for the knot to extract
    knotmodel_init = Gaussian1D(
        0.01, u0, BASE_WIDTH,
        # Allow +/- 10 km/s leeway around nominal knot velocity
        bounds={'amplitude': [0, None],
                'mean': [u0 - 10, u0 + 10],
                'stddev': [BASE_WIDTH, 25.0]},
        name='Knot')
    fullmodel += knotmodel_init
    knot_components = ['Knot']
    other_components = []

    # Depending on the knot velocity, we may need other components to
    # take up the slack too
    if u0 <= -75.0 or u0 >= -50.0:
        # Add in a generic fast knot
        fullmodel += Gaussian1D(
            0.01, -60.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [-70.0, -50.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Fast other')
        other_components.append('Fast other')

    if u0 <= -50.0:
        # Add in a generic slow knot
        fullmodel += Gaussian1D(
            0.01, -30.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [-40.0, -10.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Slow other')
        other_components.append('Slow other')

    if u0 >= -75.0:
        # Add in a very fast component
        fullmodel += Gaussian1D(
            0.001, -90.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [-110.0, -75.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Ultra-fast other')
        other_components.append('Ultra-fast other')

    if u0 <= 30.0:
        # Add in a red-shifted component just in case
        fullmodel += Gaussian1D(
            0.01, 40.0, BASE_WIDTH,
            bounds={'amplitude': [0, None],
                    'mean': [30.0, 200.0],
                    'stddev': [BASE_WIDTH, 25.0]},
            name='Red other')
        other_components.append('Red other')




    # Moment of truth: fit models to data
    fullmodel = safitter(fullmodel, vels, knotspec, err=spec_err)
    full_fit_info = safitter.fit_info

    # Isolate the core+other model components 
    coremodel = fullmodel[core_components[0]]
    for component in core_components[1:] + other_components:
        coremodel += fullmodel[component]

    # Subtract the core model from the data
    residspec = knotspec - coremodel(vels)

    # Now re-fit the knot model to the residual

    # Calculate running std of residual spectrum
    NWIN = 11
    running_mean = generic_filter(residspec, np.mean, size=(NWIN,))
    running_std = generic_filter(residspec, np.std, size=(NWIN,))

    # Increase error estimate for data points where this is larger
    # than spec_err, but only for velocities that are not in knotmask
    residerr = bgerr
    # residerr = spec_err
    mask = (~knotmask) & (running_std > bgerr)
    residerr[mask] = running_std[mask]
    # The reason for this is so that poor modelling of the core is
    # accounted for in the errors.  Otherwise the reduced chi2 of the
    # knot model will be too high

    # Make an extended mask for fitting the knot, omitting the
    # redshifted half of the spectrum since it is irrelevant and we
    # don't want it to affect tha chi2 or the confidance intervals
    bmask = vels < 50.0

    knotmodel = lmfitter(knotmodel_init,
                         vels[bmask], residspec[bmask],
                         err=residerr[bmask])

    # Calculate the final residuals, which should be flat
    final_residual = residspec - knotmodel(vels)

    # Look at stddev of the final residuals and use them to rescale
    # the residual errors.  Then re-fit the knot with this better
    # estimate of the errors.  But only if rescaling would reduce the
    # data error estimate.
    residerr_rescale = final_residual[bmask].std() / residerr[bmask].mean()
    if residerr_rescale < 1.0:
        print('Rescaling data errors by', residerr_rescale)
        residerr *= residerr_rescale
        knotmodel = lmfitter(knotmodel,
                             vels[bmask], residspec[bmask],
                             err=residerr[bmask])
    else:
        residerr_rescale = 1.0

    knot_fit_info = lmfitter.fit_info
    lmfitter._fitter.estmethod.config['max_rstat'] = MAX_RSTAT
    if knot_fit_info.rstat < MAX_RSTAT:
        knot_fit_errors = lmfitter.est_errors(sigma=3)
    else:
        knot_fit_errors = None

    return {
        'nominal knot velocity': u0,
        'velocities': vels,
        'full profile': knotspec,
        'error profile': residerr,
        'core fit model': coremodel,
        'core fit profile': coremodel(vels),
        'core fit components': {k: coremodel[k](vels) for k in coremodel.submodel_names},
        'core fit info': full_fit_info,
        'core-subtracted profile': residspec,
        'knot fit model': knotmodel,
        'knot fit profile': knotmodel(vels),
        'knot fit info': knot_fit_info,
        'knot fit errors': knot_fit_errors,
        'error rescale factor': residerr_rescale,
        'knot j range': (j1, j2),
    }
コード例 #52
0
def fit_knot(hdu, j1, j2, u0):

    NS, NV = hdu.data.shape
    w = WCS(hdu.header)
    vels, _ = w.all_pix2world(np.arange(NV), [0]*NV, 0)
    vels /= 1000.0

    # Ensure we don't go out of bounds
    j1 = max(j1, 0)
    j2 = min(j2, NS)
    print('Slit pixels {}:{} out of {}'.format(j1, j2, NS))

    knotspec = hdu.data[j1:j2, :].sum(axis=0)
    # make sure all pixels are positive, since that helps the fitting/plotting
    knotspec -= knotspec.min()

    # Levenberg-Marquardt for easy jobs
    lmfitter = SherpaFitter(statistic='chi2',
                            optimizer='levmar',
                            estmethod='confidence')
    # Simulated annealing for trickier jobs
    safitter = SherpaFitter(statistic='chi2',
                            optimizer='neldermead',
                            estmethod='covariance')

    # First do the strategy for typical knots (u0 = [-30, -80])

    # Estimate error from the BG: < -120 or > +100
    bgmask = np.abs(vels + 10.0) >= 110.0
    bgerr = np.std(knotspec[bgmask]) * np.ones_like(vels)

    # Fit to the BG with constant plus Lorentz
    try: 
        vmean = np.average(vels, weights=knotspec)
    except ZeroDivisionError:
        vmean = 15.0

    bgmodel = lmfitter(_init_bgmodel(vmean),
		       vels[bgmask], knotspec[bgmask],
		       err=bgerr[bgmask])
    # Now freeze the BG model and add it to the initial core model
    bgmodel['Lorentz'].fixed['amplitude'] = True
    bgmodel['Constant'].fixed['amplitude'] = True

    # Increase the data err in the bright part of the line to mimic Poisson noise
    # Even though we don't know what the normalization is really, we will guess ...
    spec_err = bgerr + POISSON_SCALE*np.sqrt(knotspec)

    # Fit to the line core
    knotmask = np.abs(vels - u0) <= KNOT_WIDTH
    coremodel = safitter(_init_coremodel() + bgmodel,
                         vels[~knotmask], knotspec[~knotmask],
                         err=spec_err[~knotmask])
    core_fit_info = safitter.fit_info

    # Residual should contain just knot
    residspec = knotspec - coremodel(vels)

    # Calculate running std of residual spectrum
    NWIN = 11
    running_mean = generic_filter(residspec, np.mean, size=(NWIN,))
    running_std = generic_filter(residspec, np.std, size=(NWIN,))

    # Increase error estimate for data points where this is larger
    # than spec_err, but only for velocities that are not in knotmask
    residerr = bgerr
    # residerr = spec_err
    mask = (~knotmask) & (running_std > bgerr)
    residerr[mask] = running_std[mask]
    # The reason for this is so that poor modelling of the core is
    # accounted for in the errors.  Otherwise the reduced chi2 of the
    # knot model will be too high

    # Make an extended mask for fitting the knot, omitting the
    # redshifted half of the spectrum since it is irrelevant and we
    # don't want it to affect tha chi2 or the confidance intervals
    bmask = vels < 50.0

    # Fit single Gaussian to knot 
    amplitude_init = residspec[knotmask].max()
    if amplitude_init < 0.0:
        # ... pure desperation here
        amplitude_init = residspec[bmask].max()
    knotmodel = lmfitter(_init_knotmodel(amplitude_init, u0),
                         vels[bmask], residspec[bmask],
                         err=residerr[bmask])

    # Calculate the final residuals, which should be flat
    final_residual = residspec - knotmodel(vels)

    # Look at stddev of the final residuals and use them to rescale
    # the residual errors.  Then re-fit the knot with this better
    # estimate of the errors.  But only if rescaling would reduce the
    # data error estimate.
    residerr_rescale = final_residual[bmask].std() / residerr[bmask].mean()
    if residerr_rescale < 1.0:
        print('Rescaling data errors by', residerr_rescale)
        residerr *= residerr_rescale
        knotmodel = lmfitter(knotmodel,
                             vels[bmask], residspec[bmask],
                             err=residerr[bmask])
    else:
        residerr_rescale = 1.0

    knot_fit_info = lmfitter.fit_info
    lmfitter._fitter.estmethod.config['max_rstat'] = MAX_RSTAT
    if knot_fit_info.rstat < MAX_RSTAT:
        knot_fit_errors = lmfitter.est_errors(sigma=3)
    else:
        knot_fit_errors = None

    return {
        'nominal knot velocity': u0,
        'velocities': vels,
        'full profile': knotspec,
        'error profile': residerr,
        'core fit model': coremodel,
        'core fit profile': coremodel(vels),
        'core fit components': {k: coremodel[k](vels) for k in coremodel.submodel_names},
        'core fit info': core_fit_info,
        'core-subtracted profile': residspec,
        'knot fit model': knotmodel,
        'knot fit profile': knotmodel(vels),
        'knot fit info': knot_fit_info,
        'knot fit errors': knot_fit_errors,
        'error rescale factor': residerr_rescale,
    }
コード例 #53
0
ファイル: avg_col.py プロジェクト: a-cohn/bones-research
        #measuring the size of the file
        nrows_im=len(imdata[:,0]) #number of rows of fits file
        ncols_im=len(imdata[0]) #number of columns of fits file

        nrows_dat=len(data[:,0])
        ncols_dat=len(data[0])

        w = WCS('/Users/Cohn/Desktop/summer15research/'+col_name_array[n-1])
        wim = WCS('/Users/Cohn/Desktop/summer15research/masses with corrected foreground intensity ratios/filament_'+str(n)+'_mass_above_3_background_std_above_mean_mask.fits')
        
        lon_array=np.zeros((nrows_im,ncols_im))
        lat_array=np.zeros((nrows_im,ncols_im))
        for row in range(nrows_im):
            for col in range(ncols_im):
                lon, lat = wim.all_pix2world(col,row,0)
                lon_array[row,col]=lon
                lat_array[row,col]=lat

        x_array=np.zeros((nrows_im,ncols_im))
        y_array=np.zeros((nrows_im,ncols_im))
        for row in range(nrows_im):
            for col in range(ncols_im):
                if imdata[row,col]==1:
                    x, y = w.all_world2pix(lon_array[row,col],lat_array[row,col],0)
                    x_array[row,col] = x
                    y_array[row,col] = y

        data=lbdata*data
        new_map=np.zeros((nrows_dat,ncols_dat))
        total_col=0
コード例 #54
0
ファイル: zap.py プロジェクト: charlottenosam/zap
class zclass(object):

    """ Main class to run each of the steps of ZAP.

    Attributes
    ----------

    cleancube : numpy.ndarray
        The final datacube after removing all of the residual features.
    contarray : numpy.ndarray
        A 2D array containing the subtracted continuum per spaxel.
    cube : numpy.ndarray
        The original cube with the zlevel subtraction performed per spaxel.
    especeval : list of (eigenspectra, eval)
        A list containing the full set of eigenspectra and eigenvalues
        generated by the SVD calculation that is used toy reconstruct the
        entire datacube.
    laxis : numpy.ndarray
        A 1d array containing the wavelength solution generated from the header
        parameters.
    wcs : astropy.wcs.WCS
        WCS object with the wavelength solution.
    lranges : list
        A list of the wavelength bin limits used in segmenting the sepctrum
        for SVD.
    nancube : numpy.ndarray
        A 3d boolean datacube containing True in voxels where a NaN value was
        replaced with an interpolation.
    nevals : numpy.ndarray
        A 1d array containing the number of eigenvalues used per segment to
        reconstruct the residuals.
    normstack : numpy.ndarray
        A normalized version of the datacube decunstructed into a 2d array.
    varlist : numpy.ndarray
        An array for each segment with the variance curve, calculated for the
        optimize method.
    pranges : numpy.ndarray

        The pixel indices of the bounding regions for each spectral segment.
    recon : numpy.ndarray
        A 2d array containing the reconstructed emission line residuals.
    run_clean : bool
        Boolean that indicates that the NaN cleaning method was used.
    run_zlevel : bool
        Boolean indicating that the zero level correction was used.
    stack : numpy.ndarray
        The datacube deconstructed into a 2d array for use in the the SVD.
    subespeceval : list of (eigenspectra, eval)
        The subset of eigenvalues and eigenspectra used to reconstruct the sky
        residuals.
    variancearray : numpy.ndarray
        A list of length nsegments containing variances calculated per spaxel
        used for normalization
    y,x : numpy.ndarray
        The position in the cube of the spaxels that are in the 2d
        deconstructed stack
    zlsky : numpy.ndarray
        A 1d array containing the result of the zero level subtraction

    """

    def __init__(self, cubefits=None, cube=None, hdr=None, instrument='MUSE'):
        """ Initialization of the zclass.

        Pulls the datacube into the class and trims it based on the known
        optimal spectral range of MUSE.

        """
        # If hdu cube + header is supplied
        if cube is not None:
            self.cube     = cube
            self.header   = hdr
            self.cubefits = cubefits
        
        # If there is only a cubefits file
        else:
            hdu = fits.open(cubefits)
            self.cube     = hdu[1].data
            self.header   = hdu[1].header
            self.cubefits = cubefits
            hdu.close()

        # Workaround for floating points errors in wcs computation: if cunit is
        # specified, wcslib will convert in meters instead of angstroms, so we
        # remove cunit before creating the wcs object
        header = self.header.copy()
        unit = u.Unit(header.pop('CUNIT3'))
        self.wcs = WCS(header).sub([3])

        # Create Lambda axis
        wlaxis = np.arange(self.cube.shape[0])
        self.laxis = self.wcs.all_pix2world(wlaxis, 0)[0]
        if unit != u.angstrom:
            # Make sure lambda is in angstroms
            self.laxis = (self.laxis * unit).to(u.angstrom).value

        # NaN Cleaning
        self.run_clean = False
        self.nancube = None
        self._boxsz = 1
        self._rejectratio = 0.25

        # Mask file
        self.maskfile = None

        # zlevel parameters
        self.run_zlevel = False
        self.zlsky = np.zeros_like(self.laxis)

        # Extraction results
        self.stack = None
        self.y = None
        self.x = None

        # Normalization Maps
        self.contarray = None
        self.variancearray = None
        self.normstack = None

        # identify the spectral range of the dataset
        laxmin = min(self.laxis)
        laxmax = max(self.laxis)

        logger.debug('Minimum wavelength=%d, maximum wavelength=%d', laxmin, laxmax)

        # List of segmentation limits in the optical
        if instrument == 'MUSE':
            logger.info('Setup for MUSE')
            skyseg = np.array(SKYSEG_MUSE)
        elif instrument == 'KMOS':
            logger.info('Setup for KMOS YJ')
            skyseg = np.array(SKYSEG_KMOS_YJ)
        else:
            raise ValueError('None or invalid instrument name given')

        skyseg = skyseg[(skyseg > laxmin) & (skyseg < laxmax)]

        # segment limit in angstroms
        self.lranges = (np.vstack([np.append(laxmin - 10, skyseg),
                                   np.append(skyseg, laxmax + 10)])).T

        # segment limit in pixels
        laxis = self.laxis
        lranges = self.lranges

        pranges = []
        for i in range(len(lranges)):
            logger.debug('Lranges: %.0f, %.0f',lranges[i, 0],lranges[i, 1])
            paxis = wlaxis[(laxis > lranges[i, 0]) & (laxis <= lranges[i, 1])]
            pranges.append((np.min(paxis), np.max(paxis) + 1))

        self.pranges = np.array(pranges)

        # eigenspace Subset
        self.especeval = []
        self.subespeceval = []

        # Reconstruction of sky features
        self.recon = None
        self.cleancube = None
        self.varlist = None  # container for variance curves

    @timeit
    def _run(self, clean=True, zlevel='median', cftype='weight',
             cfwidth=100, pevals=[], nevals=[], optimizeType='normal',
             extSVD=None):
        """ Perform all zclass to ZAP a datacube:

        - NaN re/masking,
        - deconstruction into "stacks",
        - zerolevel subraction,
        - continuum removal,
        - normalization,
        - singular value decomposition,
        - eigenvector selection,
        - residual reconstruction and subtraction,
        - data cube reconstruction.

        """
        logger.info('Running ZAP %s !', __version__)

        self.optimizeType = optimizeType

        # clean up the nan values
        if clean:
            self._nanclean()

        # Extract the spectra that we will be working with
        self._extract()

        # remove the median along the spectral axis
        if extSVD is None:
            if zlevel.lower() != 'none':
                self._zlevel(calctype=zlevel)
        else:
            self._externalzlevel(extSVD)

        # remove the continuum level - this is multiprocessed to speed it up
        self._continuumfilter(cfwidth=cfwidth, cftype=cftype)

        # do the multiprocessed SVD calculation
        if extSVD is None:
            self._msvd()
        else:
            self._externalSVD(extSVD)

        # choose some fraction of eigenspectra or some finite number of
        # eigenspectra
        if optimizeType != 'none' or (nevals == [] and pevals == []):
            self.optimize()
            self.chooseevals(nevals=self.nevals)
        else:
            self.chooseevals(pevals=pevals, nevals=nevals)

        # reconstruct the sky residuals using the subset of eigenspace
        self.reconstruct()

        # stuff the new spectra back into the cube
        self.remold()

    # Clean up the nan value spaxels
    def _nanclean(self):
        """
        Detects NaN values in cube and removes them by replacing them with an
        interpolation of the nearest neighbors in the data cube. The positions
        in the cube are retained in nancube for later remasking.
        """
        self.cube, self.nancube = _nanclean(
            self.cube, rejectratio=self._rejectratio, boxsz=self._boxsz)
        self.run_clean = True

    @timeit
    def _extract(self):
        """
        Deconstruct the datacube into a 2d array, since spatial information is
        not required, and the linear algebra routines require 2d arrays.

        The operation rejects any spaxel with even a single NaN value, since
        this would cause the linear algebra routines to crash.

        Adds the x and y data of these positions into the zclass

        """
        logger.debug('Extracting to 2D')
        # make a map of spaxels with NaNs
        badmap = (np.logical_not(np.isfinite(self.cube))).sum(axis=0)
        # get positions of those with no NaNs
        self.y, self.x = np.where(badmap == 0)
        # extract those positions into a 2d array
        self.stack = self.cube[:, self.y, self.x]
        logger.debug('%d valid spaxels', len(self.x))

    def _externalzlevel(self, extSVD):
        """Remove the zero level from the extSVD file."""
        logger.info('Using external zlevel')
        self.zlsky = fits.getdata(extSVD, 0)
        self.stack -= self.zlsky[:, np.newaxis]
        self.run_zlevel = 'extSVD'

    @timeit
    def _zlevel(self, calctype='median'):
        """
        Removes a 'zero' level from each spectral plane. Spatial information is
        not required, so it operates on the extracted stack.

        Operates on stack, leaving it with this level removed and adds the data
        'zlsky' to the class. zlsky is a spectrum of the zero levels.

        This zero level is currently calculated with a median.

        Experimental operations -

        - exclude top quartile
        - run in an iterative sigma clipped mode

        """

        self.run_zlevel = calctype

        if calctype != 'none':
            logger.info('Subtracting Zero Level')

            zlstack = self.stack

            if calctype == 'median':
                logger.info('Median zlevel calculation')
                func = _imedian
            elif calctype == 'sigclip':
                logger.info('Iterative Sigma Clipping zlevel calculation')
                func = _isigclip

            self.zlsky = np.hstack(parallel_map(func, zlstack, NCPU, axis=0))
            self.stack -= self.zlsky[:, np.newaxis]
        else:
            logger.info('Skipping zlevel subtraction')

    def _continuumfilter(self, cfwidth=100, cftype='weight'):
        """ A multiprocessed implementation of the continuum removal.

        This process distributes the data to many processes that then
        reassemble the data.  Uses two filters, a small scale (less than the
        line spread function) uniform filter, and a large scale median filter
        to capture the structure of a variety of continuum shapes.

        added to class
        contarray - the removed continuua
        normstack - "normalized" version of the stack with the continuua
            removed

        """
        logger.info('Applying Continuum Filter, cfwidth=%d', cfwidth)
        if cftype not in ('weight', 'median', 'none'):
            raise ValueError("cftype must be 'weight' or 'median', got {}"
                             .format(cftype))
        self._cftype = cftype
        self._cfwidth = cfwidth

        if cftype == 'median':
            weight = None
        elif cftype == 'weight':
            weight = np.abs(self.zlsky - (np.max(self.zlsky) + 1))

        # remove continuum features
        if cftype == 'none':
            self.contarray = np.zeros_like(self.stack)
            self.normstack = self.stack.copy()
        else:
            self.contarray = _continuumfilter(self.stack, cftype,
                                              weight=weight, cfwidth=cfwidth)
            self.normstack = self.stack - self.contarray

    @timeit
    def _msvd(self):
        """ Multiprocessed singular value decomposition.

        First the normstack is normalized per segment per spaxel by the
        variance.  Takes the normalized, spectral segments and distributes them
        to the individual svd methods.

        """
        logger.info('Calculating SVD')

        # normalize the variance in the segments
        nseg = len(self.pranges)
        self.variancearray = var = np.zeros((nseg, self.stack.shape[1]))

        for i in range(nseg):
            pmin, pmax = self.pranges[i]
            var[i, :] = np.var(self.normstack[pmin:pmax, :], axis=0)
            self.normstack[pmin:pmax, :] /= var[i, :]

        logger.debug('Beginning SVD on %d segments', nseg)
        indices = [x[0] for x in self.pranges[1:]]
        self.especeval = parallel_map(_isvd, self.normstack, indices, axis=0)

    def chooseevals(self, nevals=[], pevals=[]):
        """ Choose the number of eigenspectra/evals to use for reconstruction.

        User supplies the number of eigen spectra to be used (neval) or the
        percentage of the eigenspectra that were calculated (peval) from each
        spectral segment to be used.

        The user can either provide a single value to be used for all segments,
        or provide an array that defines neval or peval per segment.

        """
        nranges = len(self.especeval)
        nevals = np.atleast_1d(nevals)
        pevals = np.atleast_1d(pevals)
        nespec = np.array([self.especeval[i][0].shape[1]
                           for i in range(nranges)])

        # deal with no selection
        if len(nevals) == 0 and len(pevals) == 0:
            logger.info('Number of modes not selected')
            nevals = np.array([1])

        # deal with an input list
        if len(nevals) > 1:
            if len(nevals) != nranges:
                nevals = np.array([nevals[0]])
                logger.info('Chosen eigenspectra array does not correspond to '
                            'number of segments')
            else:
                logger.info('Choosing %s eigenspectra for segments', nevals)

        if len(pevals) > 1:
            if len(pevals) != nranges:
                pevals = np.array([pevals[0]])
                logger.info('Chosen eigenspectra array does not correspond to '
                            'number of segments')
            else:
                logger.info('Choosing %s%% of eigenspectra for segments',
                            pevals)
                nevals = (pevals * nespec / 100.).round().astype(int)

        # deal with single value entries
        if len(pevals) == 1:
            logger.info('Choosing %s%% of eigenspectra for all segments',
                        pevals)
            nevals = (pevals * nespec / 100.).round().astype(int)
        elif len(nevals) == 1:
            logger.info('Choosing %s eigenspectra for all segments', nevals)
            nevals = np.zeros(nranges, dtype=int) + nevals

        # take subset of the eigenspectra and put them in a list
        subespeceval = []
        for i in range(nranges):
            eigenspectra, evals = self.especeval[i]
            tevals = (evals[0:nevals[i], :]).copy()
            teigenspectra = (eigenspectra[:, 0:nevals[i]]).copy()
            subespeceval.append((teigenspectra, tevals))

        self.subespeceval = subespeceval
        self.nevals = nevals

    @timeit
    def reconstruct(self):
        """Reconstruct the residuals from a given set of eigenspectra and
        eigenvalues
        """

        logger.info('Reconstructing Sky Residuals')
        nseg = len(self.especeval)
        rec = [(eig[:, :, np.newaxis] * ev[np.newaxis, :, :]).sum(axis=1)
               for eig, ev in self.subespeceval]

        # rescale to correct variance
        for i in range(nseg):
            rec[i] *= self.variancearray[i, :]
        self.recon = np.concatenate(rec)

    # stuff the stack back into a cube
    def remold(self):
        """ Subtracts the reconstructed residuals and places the cleaned
        spectra into the duplicated datacube.
        """
        logger.info('Applying correction and reshaping data product')
        self.cleancube = self.cube.copy()
        self.cleancube[:, self.y, self.x] = self.stack - self.recon
        if self.run_clean:
            self.cleancube[self.nancube] = np.nan

    # redo the residual reconstruction with a different set of parameters
    def reprocess(self, pevals=[], nevals=[]):
        """
        A method that redoes the eigenvalue selection, reconstruction, and
        remolding of the data.
        """

        self.chooseevals(pevals=pevals, nevals=nevals)
        self.reconstruct()
        self.remold()

    @timeit
    def optimize(self):
        """ Function to optimize the number of components used to characterize
        the residuals.

        This function calculates the variance per segment with an increasing
        number of eigenspectra/eigenvalues. It then deterimines the point at
        which the second derivative of this variance curve reaches zero. When
        this occurs, the linear reduction in variance is attributable to the
        removal of astronomical features rather than emission line residuals.

        """
        logger.info('Optimizing')

        normstack = self.stack - self.contarray
        nseg = len(self.especeval)
        self.nevals = np.zeros(nseg, dtype=int)
        indices = [x[0] for x in self.pranges[1:]]
        self.varlist = parallel_map(_ivarcurve, normstack, indices, axis=0,
                                    especeval=self.especeval,
                                    variancearray=self.variancearray)

        if self.optimizeType == 'enhanced':
            logger.info('Enhanced Optimization')
        else:
            logger.info('Normal Optimization')

        for i in range(nseg):
            # optimize
            varlist = self.varlist[i]
            deriv = varlist[1:] - varlist[:-1]
            deriv2 = deriv[1:] - deriv[:-1]
            noptpix = varlist.size

            if self.optimizeType != 'enhanced':
                # statistics on the derivatives
                ind = int(.5 * (noptpix - 2))
                mn1 = deriv[ind:].mean()
                std1 = deriv[ind:].std() * 2
                mn2 = deriv2[ind:].mean()
                std2 = deriv2[ind:].std() * 2
                # look for crossing points. When they get within 1 sigma of
                # mean in settled region.
                # pad by 1 for 1st deriv
                cross1 = np.append([False], deriv >= (mn1 - std1))
                # pad by 2 for 2nd
                cross2 = np.append([False, False],
                                   np.abs(deriv2) <= (mn2 + std2))
                cross = np.logical_or(cross1, cross2)
            else:
                # statistics on the derivatives
                ind = int(.75 * (noptpix - 2))
                mn1 = deriv[ind:].mean()
                std1 = deriv[ind:].std()
                mn2 = deriv2[ind:].mean()
                std2 = deriv2[ind:].std()
                # pad by 1 for 1st deriv
                cross = np.append([False], deriv >= (mn1 - std1))

            self.nevals[i] = np.where(cross)[0][0]

    # #########################################################################
    # #################################### Extra Functions ####################
    # #########################################################################

    def make_contcube(self):
        """ Remold the continuum array so it can be investigated.

        Takes the continuum stack and returns it into a familiar cube form.
        """
        contcube = self.cube.copy() * np.nan
        contcube[:, self.y, self.x] = self.contarray
        return contcube

    def _externalSVD(self, extSVD):
        logger.info('Calculating eigenvalues for input eigenspectra')
        hdu = fits.open(extSVD)
        nseg = len(self.pranges)

        # normalize the variance in the segments
        self.variancearray = np.zeros((nseg, self.stack.shape[1]))

        for i in range(nseg):
            pmin, pmax = self.pranges[i]
            logger.debug('pmin=%.0f, pmax=%.0f',pmin,pmax)
            self.variancearray[i, :] = np.var(self.normstack[pmin:pmax, :],
                                              axis=0)
            self.normstack[pmin:pmax, :] /= self.variancearray[i, :]

        especeval = []
        for i in range(nseg):
            eigenspectra = hdu[i + 1].data
            ns = self.normstack[self.pranges[i][0]:self.pranges[i][1]]
            evals = np.transpose(np.transpose(ns).dot(eigenspectra))
            especeval.append([eigenspectra, evals])

        self.especeval = especeval
        hdu.close()

    def _applymask(self, mask, maskfits=True):
        """Apply a mask to the input data to provide a cleaner basis set.

        mask is >1 for objects, 0 for sky so that people can use sextractor.
        The file is read with ``astropy.io.fits.getdata`` which first tries to
        read the primary extension, then the first extension is no data was
        found before.

        If maskfits is False, input mask can be 2D array, not fits file

        """
        logger.info('Applying Mask for SVD Calculation from %s', mask)
        if maskfits:
            self.maskfile = mask
            mask = fits.getdata(mask).astype(bool)
        nmasked = np.count_nonzero(mask)
        logger.info('Masking %d pixels (%d%%)', nmasked,
                    nmasked / np.prod(mask.shape) * 100)
        self.cube[:, mask] = np.nan

    ###########################################################################
    ##################################### Output Functions ####################
    ###########################################################################

    def writecube(self, outcubefits='DATACUBE_ZAP.fits', clobber=False):
        """Write the processed datacube to an individual fits file."""

        check_file_exists(outcubefits,clobber=clobber)
        # fix up for writing
        outhead = _newheader(self)

        # create hdu and write
        outhdu = fits.PrimaryHDU(data=self.cleancube, header=outhead)
        outhdu.writeto(outcubefits,clobber=clobber)
        logger.info('Cube file saved to %s', outcubefits)

    def writeskycube(self, skycubefits='SKYCUBE_ZAP.fits', clobber=False):
        """Write the processed datacube to an individual fits file."""

        check_file_exists(skycubefits,clobber=clobber)
        # fix up for writing
        outcube = self.cube - self.cleancube
        outhead = _newheader(self)

        # create hdu and write
        outhdu = fits.PrimaryHDU(data=outcube, header=outhead)
        outhdu.writeto(skycubefits,clobber=clobber)
        logger.info('Sky cube file saved to %s', skycubefits)

    def mergefits(self, outcubefits, clobber=False):
        """Merge the ZAP cube into the full muse datacube and write."""

        # make sure it has the right extension
        outcubefits = outcubefits.split('.fits')[0] + '.fits'
        check_file_exists(outcubefits,clobber=clobber)
        hdu = fits.open(self.cubefits)
        hdu[1].header = _newheader(self)
        hdu[1].data = self.cleancube
        hdu.writeto(outcubefits,clobber=clobber)
        hdu.close()
        logger.info('Cube file saved to %s', outcubefits)

    def writeSVD(self, svdoutputfits='ZAP_SVD.fits', clobber=False):
        """Write the SVD to an individual fits file."""

        check_file_exists(svdoutputfits,clobber=clobber)
        header = fits.Header()
        header['ZAPvers'] = (__version__, 'ZAP version')
        header['ZAPzlvl'] = (self.run_zlevel, 'ZAP zero level correction')
        header['ZAPclean'] = (self.run_clean,
                              'ZAP NaN cleaning performed for calculation')
        header['ZAPcftyp'] = (self._cftype, 'ZAP continuum filter type')
        header['ZAPcfwid'] = (self._cfwidth, 'ZAP continuum filter size')
        header['ZAPmask'] = (self.maskfile, 'ZAP mask used to remove sources')
        nseg = len(self.pranges)
        header['ZAPnseg'] = (nseg, 'Number of segments used for ZAP SVD')

        hdu = fits.HDUList([fits.PrimaryHDU(self.zlsky)])
        for i in range(len(self.pranges)):
            hdu.append(fits.ImageHDU(self.especeval[i][0]))
        # write for later use
        hdu.writeto(svdoutputfits,clobber=clobber)
        logger.info('SVD file saved to %s', svdoutputfits)

    def plotvarcurve(self, i=0, ax=None):
        if len(self.varlist) == 0:
            logger.warning('No varlist found. The optimize method must be '
                           'run first.')
            return

        # optimize
        deriv = (np.roll(self.varlist[i], -1) - self.varlist[i])[:-1]
        deriv2 = (np.roll(deriv, -1) - deriv)[:-1]
        noptpix = self.varlist[i].size

        if self.optimizeType == 'normal':
            # statistics on the derivatives
            mn1 = deriv[.5 * (noptpix - 2):].mean()
            std1 = deriv[.5 * (noptpix - 2):].std() * 2
            mn2 = deriv2[.5 * (noptpix - 2):].mean()
            std2 = deriv2[.5 * (noptpix - 2):].std() * 2
        else:
            # statistics on the derivatives
            mn1 = deriv[.75 * (noptpix - 2):].mean()
            std1 = deriv[.75 * (noptpix - 2):].std()
            mn2 = deriv2[.75 * (noptpix - 2):].mean()
            std2 = deriv2[.75 * (noptpix - 2):].std()

        if ax is None:
            import matplotlib.pyplot as plt
            fig, ax = plt.subplots(3, 1, figsize=[10, 15])

        ax1, ax2, ax3 = ax
        ax1.plot(self.varlist[i], linewidth=3)
        ax1.plot([self.nevals[i], self.nevals[i]],
                 [min(self.varlist[i]), max(self.varlist[i])])
        ax1.set_ylabel('Variance')

        ax2.plot(np.arange(deriv.size), deriv)
        ax2.plot([0, len(deriv)], [mn1, mn1], 'k')
        ax2.plot([0, len(deriv)], [mn1 - std1, mn1 - std1], '0.5')
        ax2.plot([self.nevals[i] - 1, self.nevals[i] - 1],
                 [min(deriv), max(deriv)])
        ax2.set_ylabel('d/dn Var')

        ax3.plot(np.arange(deriv2.size), np.abs(deriv2))
        ax3.plot([0, len(deriv2)], [mn2, mn2], 'k')
        ax3.plot([0, len(deriv2)], [mn2 + std2, mn2 + std2], '0.5')
        ax3.plot([self.nevals[i] - 2, self.nevals[i] - 2],
                 [min(deriv2), max(deriv2)])
        ax3.set_ylabel('(d^2/dn^2) Var')
        # ax3.set_xlabel('Number of Components')

        ax1.set_title('Segment {0}, {1} - {2} Angstroms'.format(
            i, self.lranges[i][0], self.lranges[i][1]))
コード例 #55
0
ファイル: metaData.py プロジェクト: rashley2712/IPHAS
		print("Looking at the files in %s."%(f[0]))
		fitsFiles = []
		for file in f[2]:
			if "fits.fz" in file:
				sys.stdout.write("\r%s    "%file)
				sys.stdout.flush()
		
				newfitsObject = fitsObject()
				fitsFiles.append(file)
				hdulist = fits.open(joinPaths(f[0], file))
				newfitsObject.filter = hdulist[1].header['WFFBAND']
				newfitsObject.CCD = hdulist[1].header['DASCHAN']
				wcsSolution = WCS(hdulist[1].header)
				(height, width) = numpy.shape(hdulist[1].data)
				imageCentre = [ width/2, height/2]
				ra, dec = wcsSolution.all_pix2world([imageCentre], 1)[0]
				newfitsObject.setCentre(ra, dec)
				newfitsObject.path = f[0]
				newfitsObject.filename = file
				cacheFilename = file[:9] + "_dr2_cache.fits"
				if os.path.exists(joinPaths(f[0], cacheFilename)):
					sys.stdout.write(" found cached dr2 data in:" + str(cacheFilename))
					sys.stdout.flush()
					newfitsObject.cached = True
				else:
					newfitsObject.cached = False
					
				hdulist.close()
				fitsObjects.append(newfitsObject)
				outCSV.write("%s, %s, %s, %f, %f, %s\n"%(newfitsObject.filename, newfitsObject.CCD, newfitsObject.filter, newfitsObject.ra, newfitsObject.dec, newfitsObject.cached))
				outCSV.flush()
コード例 #56
0
    
    # Read in the image and find tars in the image
    Ifile = (stokesDir + delim +
             '_'.join([thisTarget, thisWaveband, 'I']) + '.fits')
    stokesI   = Image(Ifile)
    mean, median, std = sigma_clipped_stats(stokesI.arr, sigma=3.0, iters=5)
    threshold = median + 3.0*std
    fwhm    = 3.0
    sources = daofind(stokesI.arr, threshold, fwhm, ratio=1.0, theta=0.0,
                      sigma_radius=1.5, sharplo=0.2, sharphi=1.0,
                      roundlo=-1.0, roundhi=1.0, sky=0.0,
                      exclude_border=True)
    
    # Convert source positions to RA and Dec
    wcs      = WCS(stokesI.header)
    ADstars  = wcs.all_pix2world(sources['xcentroid'], sources['ycentroid'], 0)
    catalog2 = SkyCoord(ra = ADstars[0]*u.deg, dec = ADstars[1]*u.deg, frame = 'fk5')
    
    
    ###
    ### This slow, meat-axe method was useful for verification.
    ### It produces the same results as the method below.
    ###
#    # Loop through each of the detected sources, and check for possible confusion
#    keepStars    = []
#    numCat1Match = []
#    numCat2Match = []
#    for i in range(len(catalog2)):
#        # Establish the coordinates of the current star
#        thisCoord = SkyCoord(ra = catalog2[i].ra, dec = catalog2[i].dec)
#        
コード例 #57
0
import aplpy
import pyregion
from astropy.wcs import WCS
from astropy.io import fits
from astropy.table import Table
import numpy as np

cat = np.load('coords_3.npy')
fig = aplpy.FITSFigure('../../vcc.image/1545.g.fits')
fig.show_grayscale(vmin=0,vmid=-0.3,invert='y',stretch='log')
#fig.show_regions('vcc1545_backup.reg')
img = fits.open('../../vcc.image/1545.g.fits')

w = WCS('../../vcc.image/1545.g.fits')
x_max = img[0].header['NAXIS1']
y_max = img[0].header['NAXIS2']
ra_center,dec_center = w.all_pix2world(x_max/2+0.5,y_max/2+0.5,0)# Pixel to WCS
print ra_center, dec_center
fig.show_circles(round(ra_center,5),round(dec_center,5),22.491/3600)
for item in cat:
	if item[2]==1:
		fig.show_circles(item[0],item[1],2./3600,color='red',linewidth=2)
	if item[2]==2:
		fig.show_circles(item[0],item[1],2./3600,color='yellow')
	if item[2]==3:
		fig.show_circles(item[0],item[1],2./3600,color='blue',linewidth=2)
fig.save('aa.png')
コード例 #58
0
ファイル: utils.py プロジェクト: rodluger/everest
def GetHiResImage(ID):
    '''
    Queries the Palomar Observatory Sky Survey II catalog to
    obtain a higher resolution optical image of the star with EPIC number
    :py:obj:`ID`.

    '''

    # Get the TPF info
    client = kplr.API()
    star = client.k2_star(ID)
    k2ra = star.k2_ra
    k2dec = star.k2_dec
    tpf = star.get_target_pixel_files()[0]
    with tpf.open() as f:
        k2wcs = WCS(f[2].header)
        shape = np.array(f[1].data.field('FLUX'), dtype='float64')[0].shape

    # Get the POSS URL
    hou = int(k2ra * 24 / 360.)
    min = int(60 * (k2ra * 24 / 360. - hou))
    sec = 60 * (60 * (k2ra * 24 / 360. - hou) - min)
    ra = '%02d+%02d+%.2f' % (hou, min, sec)
    sgn = '' if np.sign(k2dec) >= 0 else '-'
    deg = int(np.abs(k2dec))
    min = int(60 * (np.abs(k2dec) - deg))
    sec = 3600 * (np.abs(k2dec) - deg - min / 60)
    dec = '%s%02d+%02d+%.1f' % (sgn, deg, min, sec)
    url = 'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&' + \
          'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3=' % (ra, dec)

    # Query the server
    r = urllib.request.Request(url)
    handler = urllib.request.urlopen(r)
    code = handler.getcode()
    if int(code) != 200:
        # Unavailable
        return None
    data = handler.read()

    # Atomically write to a temp file
    f = NamedTemporaryFile("wb", delete=False)
    f.write(data)
    f.flush()
    os.fsync(f.fileno())
    f.close()

    # Now open the POSS fits file
    with pyfits.open(f.name) as ff:
        img = ff[0].data

    # Map POSS pixels onto K2 pixels
    xy = np.empty((img.shape[0] * img.shape[1], 2))
    z = np.empty(img.shape[0] * img.shape[1])
    pwcs = WCS(f.name)
    k = 0
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            ra, dec = pwcs.all_pix2world(float(j), float(i), 0)
            xy[k] = k2wcs.all_world2pix(ra, dec, 0)
            z[k] = img[i, j]
            k += 1

    # Resample
    grid_x, grid_y = np.mgrid[-0.5:shape[1] - 0.5:0.1, -0.5:shape[0] - 0.5:0.1]
    resampled = griddata(xy, z, (grid_x, grid_y), method='cubic')

    # Rotate to align with K2 image. Not sure why, but it is necessary
    resampled = np.rot90(resampled)

    return resampled