コード例 #1
0
def plotspFluxcalibRatios(plate,mjd,fiberlist=[],verbose=False):
	# Look up exposure ids
	if verbose:
		print 'Looking up exposureIDs for %s-%s...' % (plate,mjd)
	exposureIDs = getExposureIDs(plate,mjd,camera='b')
	# Iterate over exposure ids
	if verbose:
		print 'Found %d frames...' % len(exposureIDs)
	for exposureID in exposureIDs:
		bossFluxcalibname = os.path.join(BSR,redVersion,plate,'spFluxcalib-%s.fits.gz'%exposureID)
		blueFluxcalibname = os.path.join(BSR,blueVersion,plate,'spFluxcalib-%s.fits.gz'%exposureID)
		# Read boss standard calibrated spFluxcalib
		bosshdu = pf.open(bossFluxcalibname)
		npix = bosshdu[0].header['naxis1']
		wave = np.arange(npix)
		bossflux = bosshdu[0].data
		bossheader = bosshdu[0].header
		bosshdu.close()
		# Read blue standard calibrated spFluxcalib
		bluehdu = pf.open(blueFluxcalibname)
		bluenpix = bluehdu[0].header['naxis1']
		blueflux = bluehdu[0].data
		bluehdu.close()
		if bluenpix != npix:
			print '!! warning bossnpix != bluenpix: %d != %d' % (npix,bluenpix)
			print '!! skipping %s-%s...' % (plate,mjd)
			return -1
		# Plot ratio
		savedir = os.path.join(BSR,blueVersion,plate)
コード例 #2
0
ファイル: gen_utils.py プロジェクト: eirikgje/misc_python
def calc_alm_chisq_fromfits(almfile, clfile):
    alm = pyfits.open(almfile)[0].data
    cls = pyfits.open(clfile)[0].data
    numiter = alm.shape[0]
    numchain = alm.shape[1]
    alm = np.reshape(alm, (numiter*numchain,alm.shape[2], alm.shape[3], alm.shape[4], alm.shape[5]))
    alm = alm[:, :, :, 2:, :]
    cls = cls[1:]
    cls = np.reshape(cls, (numiter * numchain, cls.shape[2], cls.shape[3]))
    if alm.shape[1] == 3:
        cls = np.concatenate((cls[:, 0:1, :], cls[:, 3:4, :], cls[:, 5:6, :]), 1)
    elif alm.shape[1] == 1:
        cls = cls[:, 0:1, :]
    cls = cls[:, :, 2:]
    cls = np.transpose(cls).copy()
    alm = np.transpose(alm).copy()
    chisq = np.zeros(cls.shape)
    for i in range(cls.shape[0]):
        l = i + 2
        for m in range(l):
            if m == 0:
                chisq[i, :, :] += alm[0, i, m, :, :] ** 2
            else:
                chisq[i, :, :] += np.sum(2 * alm[:, i, m, :, :] ** 2, 0)
        chisq[i, :, :] = chisq[i, :, :] / cls[i, :, :] / (2 * l + 1) * (l * (l + 1)) / (2 * np.pi)
    return chisq
コード例 #3
0
 def combine_seg_map(self, filt, out_dir):
     """Combines bright and faint segmentation maps. Regions belonging to
     bright objects are expanded by 5 pixels"""
     cat_name = out_dir + '/' + filt + '_clean.cat'
     bright_name = out_dir + '/' + filt + '_bright_seg_map.fits'
     faint_name = out_dir + '/' + filt + '_faint_seg_map.fits'
     hdu1 = pyfits.open(bright_name)
     hdu2 = pyfits.open(faint_name)
     br = hdu1[0].data
     ft = hdu2[0].data
     hdu2.close()
     hdu1.close()
     cat = Table.read(cat_name, format='ascii.basic')
     new_seg = br
     # Expand bright regions by 5 pixels
     q, = np.where(cat['IS_BRIGHT'] == 1)
     for i in q:
         new_seg = fn.seg_expand(new_seg, buff=5, val=int(i) + 1, set_to=int(i) + 1)
         # +1 to account for renumbering
     q, = np.where(cat['IS_BRIGHT'] == 0)
     s = ft.shape
     for i in q:
         for j in range(s[0]):
             pix, = np.where((ft[j, :] == cat['OLD_NUMBER'][i]) & (new_seg[j, :] == 0))
             new_seg[j][pix] = cat['NUMBER'][i] + 1
     new_seg_name = out_dir + '/' + filt + '_comb_seg_map.fits'
     print "Bright faint combined seg map created at", new_seg_name
     pyfits.writeto(new_seg_name, new_seg, clobber=True)
     os.remove(bright_name)
     os.remove(faint_name)
コード例 #4
0
def loadFiles(
    imName = None,
    maskName = None,
    satMaskName = None,
    invertMask = False,
):
    """Load a new image and/or mask and/or satMask from a fits file.

    Inputs:
    - imName: path to image FITS file; None to use current image
    - maskName: path to bad pixel mask; 0=good unless invertMask is true;
            None to use current mask, if any
    - satMaskName: path to saturated pixel mask; 0=good regardless of invertMask;
            None to use current mask, if any
    """
    global im, imFits, mask, maskFits, satMask, satMaskFits, isSat, sd
    if imName:
        imFits = pyfits.open(imName)
        print("Loading image %s into imFits and im" % (imName,))
        dataSec = parseDataSec(imFits[0].header.get("DATASEC"))
        dataShape = imFits[0].data.shape
        if dataSec is None:
            dataSec = [0, dataShape[0], 0, dataShape[1]]
        im = imFits[0].data[dataSec[0]:dataSec[1], dataSec[2]:dataSec[3]]
    if maskName:
        print("Loading bad pixel mask %s into maskFits and mask" % (maskName,))
        maskFits = pyfits.open(maskName)
        mask = maskFits[0].data[dataSec[0]:dataSec[1], dataSec[2]:dataSec[3]] > 0.1
    if satMaskName:
        print("Loading saturated pixel mask %s into satMaskFits and satMask" % (satMaskName,))
        satMaskFits = pyfits.open(satMaskName)
        satMask = satMaskFits[0].data[dataSec[0]:dataSec[1], dataSec[2]:dataSec[3]] > 0.1
    return im, mask, satMask
コード例 #5
0
ファイル: Flats.py プロジェクト: themiyan/MosfireDRP_Themiyan
def combine_off_on(maskname, band, options, lampsOff=False):
    '''
    combine list of flats into a flat file'''


    file_off = os.path.join("combflat_lamps_off_2d_%s.fits" 
                    % (band))

    file_on = os.path.join("combflat_2d_%s.fits" 
                    % (band))

    file_on_save = os.path.join("combflat_lamps_on_2d_%s.fits" 
                    % (band))

    hdu_off  = pyfits.open(file_off)
    hdu_on   = pyfits.open(file_on)

    #save lamps On data set to new name
    hdu_on.writeto(file_on_save, clobber=True)

    hdu_on[0].data = hdu_on[0].data - hdu_off[0].data

    #Add comment that the difference was completed
    hdu_on[0].header.add_history("Differenced the Lamps on and Lamps off images ")
    #save lamps On data set to new name
    hdu_on.writeto(file_on, clobber=True)
コード例 #6
0
ファイル: sourceSelection.py プロジェクト: ATNF/askapsdp
    def __init__(self, parset):

        self.method = parset.get_value('sourceSelection','none')
        self.setFluxType("peak")
        if self.method == 'threshold':
            self.threshImageName = parset.get_value('thresholdImage','detectionThreshold.i.clean.fits')
            if os.path.exists(self.threshImageName):
                threshim=pyfits.open(self.threshImageName)
                self.threshmap = threshim[0].data
                threshHeader = threshim[0].header
                self.threshWCS = pywcs.WCS(threshHeader)
                threshim.close()
                print "Using threshold map %s to determine source inclusion"%self.threshImageName
            else:
                print "Threshold image %s not available. Switching sourceSelection to 'none'."%self.threshImageName
                self.method='none'
        elif self.method == 'weights':
            self.weightsImageName = parset.get_value('weightsImage','weights.i.clean.fits')
            if os.path.exists(self.weightsImageName):
                weightsim = pyfits.open(self.weightsImageName)
                self.weightsmap = weightsim[0].data
                weightsHeader = weightsim[0].header
                self.weightsWCS = pywcs.WCS(weightsHeader)
                self.weightCutoff = parset.get_value('weightsCutoff',0.1)
                print "Using weights image %s with relative cutoff %f (=%f) to determine source inclusion"%(self.weightsImageName,self.weightCutoff,self.weightCutoff*self.weightsmap.max())
                self.weightCutoff = self.weightCutoff * self.weightsmap.max()
            else:
                print "Weights image %s not available. Switching sourceSelection to 'none'."%self.weightsImageName
                self.method='none'
コード例 #7
0
ファイル: NGC_paper_plots.py プロジェクト: eigenbrot/snakes
def plot_psi_weights(output,
                     modelfile='/d/monk/eigenbrot/WIYN/14B-0456/anal/models/allZ2_vardisp/allz2_vardisp_batch_interp.fits'):
    #Like the last page of all the fit plots, but for all pointings at once
    #cribbed from plot_bc_vardisp.py

    m = pyfits.open(modelfile)[1].data[0]
    numZ = np.unique(m['Z'][:,0]).size
    numAge = np.unique(m['AGE'][:,0]).size
    big_W = np.zeros((numZ,numAge))
    
    for p in range(6):
        coeffile = 'NGC_891_P{}_bin30_allz2.coef.fits'.format(p+1)
        print coeffile
        coef_arr = pyfits.open(coeffile)[1].data
        numap = coef_arr['VSYS'].size
        
        for i in range(numap):
            wdata = coef_arr[i]['LIGHT_FRAC'].reshape(numZ,numAge)
            big_W += wdata/np.max(wdata)

    bwax = plt.figure().add_subplot(111)
    bwax.imshow(big_W,origin='lower',cmap='Blues',interpolation='none')
    bwax.set_xlabel('SSP Age [Gyr]')
    bwax.set_xticks(range(numAge))
    bwax.set_xticklabels(m['AGE'][:numAge,0]/1e9)
    bwax.set_ylabel(r'$Z/Z_{\odot}$')
    bwax.set_yticks(range(numZ))
    bwax.set_yticklabels(m['Z'][::numAge,0])

    pp = PDF(output)
    pp.savefig(bwax.figure)
    pp.close()
    plt.close(bwax.figure)
    
    return
コード例 #8
0
def add_real_back(main_path, nm_stm, gal_im, ICL_im, useICL,back_im,
                  rowc, colc, petrorad_pix):
    sigma = 1.0
    
    back_image = pf.open(main_path+back_im)
    back_head = back_image[0].header 
    back_data = back_image[0].data
    back_image.close()
            
    data_image = pf.open(main_path+gal_im)
    data = data_image[0].data
    data = ndimage.filters.gaussian_filter(data,sigma,mode='constant',cval=0.0)
    header = data_image[0].header
    data_image.close()

    new_dat= insert_im(data,back_data,rowc,colc)
    new_head = combine_head(header, back_head,rowc,colc, icl=False)
    if useICL:
        ICL_image = pf.open(main_path+ICL_im)
        ICL = ICL_image[0].data
        ICL=ndimage.filters.gaussian_filter(ICL,sigma,mode='constant',cval=0.0)
        ICLheader = ICL_image[0].header
        ICL_image.close()
    
        new_dat = insert_im(ICL,new_dat,rowc,colc)
        new_head = combine_head(ICLheader, new_head, rowc,colc,icl=True)

    new_dat = cut_im(new_dat, rowc, colc, petrorad_pix)
    new_head.update("PSF", sigma, "gaussian sigma used in convolution")
    
    ext = pf.PrimaryHDU(new_dat, new_head)
    ext.writeto(main_path+nm_stm+"chipflat.fits", clobber = 1)
            
    return new_dat, new_head
コード例 #9
0
ファイル: grad_j.py プロジェクト: jpinedaf/velocity_tools
def fitstoarrays(ffile,fmask):

  fitsfile = pyfits.open(ffile)
  data = fitsfile[0].data

  header = pyfits.getheader(ffile)
  naxis1 = header['naxis1']
  naxis2 = header['naxis2']
  cdelt1 = header['cdelt1']
  cdelt2 = header['cdelt2']
  crpix1 = header['crpix1']
  crpix2 = header['crpix2']
  crval1 = header['crval1']
  crval2 = header['crval2']

  X = zeros(data.shape)
  Y = zeros(data.shape)

  for j in range(data.shape[0]):
    for i in range(data.shape[1]):
      X[j,i] = (1+i)*cdelt1
      Y[j,i] = (1+j)*cdelt2

  maskfile = pyfits.open(fmask)
  datam = maskfile[0].data

  mask = datam!=0
  #Z = (X**2+Y**2)

  return X[mask],Y[mask],data[mask]
コード例 #10
0
def get_model_spec_martell():
    # Carbon and nitrogen theoretical gradient spectra
    DATA_DIR = "/Users/annaho/Data/Martell"
    inputf = "ssg_wv.fits"
    a = pyfits.open(DATA_DIR + "/" + inputf)
    wl = a[1].data
    a.close()

    inputf = "ssg_nowv.fits"
    a = pyfits.open(DATA_DIR + "/" + inputf)
    dat = a[1].data
    a.close()

    ind = np.where(np.logical_and(dat['Nfe']==0.6, dat['FeH']==-1.41))[0]
    cfe = dat['cfe'][ind]
    # only step from -0.4 to 0.4
    #dflux = cannon_normalize(dat[ind[-1]][3])-cannon_normalize(dat[ind[0]][3])
    dflux = cannon_normalize(dat[ind[9]][3])-cannon_normalize(dat[ind[5]][3])
    #dcfe = cfe[1]-cfe[0]
    dcfe = cfe[9]-cfe[5]
    c_grad_spec = (dflux/dcfe)

    ind = np.where(np.logical_and(dat['cfe']==-0.4, dat['FeH']==-1.41))[0]
    nfe = dat['nfe'][ind]
    # only step from -0.4 to 0.4
    dflux = cannon_normalize(dat[ind[5]][3])-cannon_normalize(dat[ind[1]][3])
    dnfe = nfe[5]-nfe[1]
    n_grad_spec = (dflux/dnfe)
    
    return wl, c_grad_spec, n_grad_spec
コード例 #11
0
ファイル: test_groups.py プロジェクト: embray/PyFITS
    def test_parnames_round_trip(self):
        """
        Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/130

        Ensures that opening a random groups file in update mode or writing it
        to a new file does not cause any change to the parameter names.
        """

        # Because this test tries to update the random_groups.fits file, let's
        # make a copy of it first (so that the file doesn't actually get
        # modified in the off chance that the test fails
        self.copy_file('random_groups.fits')

        parameters = ['UU', 'VV', 'WW', 'BASELINE', 'DATE']
        with fits.open(self.temp('random_groups.fits'), mode='update') as h:
            assert h[0].parnames == parameters
            h.flush()
        # Open again just in read-only mode to ensure the parnames didn't
        # change
        with fits.open(self.temp('random_groups.fits')) as h:
            assert h[0].parnames == parameters
            h.writeto(self.temp('test.fits'))

        with fits.open(self.temp('test.fits')) as h:
            assert h[0].parnames == parameters
コード例 #12
0
ファイル: test_hdulist.py プロジェクト: Doomchinchilla/PyFITS
    def test_update_resized_header2(self):
        """
        Regression test for https://trac.assembla.com/pyfits/ticket/150

        This is similar to test_update_resized_header, but specifically tests a
        case of multiple consecutive flush() calls on the same HDUList object,
        where each flush() requires a resize.
        """

        data1 = np.arange(100)
        data2 = np.arange(100) + 100
        phdu = fits.PrimaryHDU(data=data1)
        hdu = fits.ImageHDU(data=data2)

        phdu.writeto(self.temp('temp.fits'))

        with fits.open(self.temp('temp.fits'), mode='append') as hdul:
            hdul.append(hdu)

        with fits.open(self.temp('temp.fits'), mode='update') as hdul:
            idx = 1
            while len(str(hdul[0].header)) <= 2880 * 2:
                hdul[0].header['TEST%d' % idx] = idx
                idx += 1
            hdul.flush()
            hdul.append(hdu)

        with fits.open(self.temp('temp.fits')) as hdul:
            assert (hdul[0].data == data1).all()
            assert hdul[1].header == hdu.header
            assert (hdul[1].data == data2).all()
            assert (hdul[2].data == data2).all()
コード例 #13
0
ファイル: test_hdulist.py プロジェクト: Doomchinchilla/PyFITS
    def test_save_backup(self):
        """Test for https://trac.assembla.com/pyfits/ticket/121

        Save backup of file before flushing changes.
        """

        self.copy_file('scale.fits')

        with ignore_warnings():
            with fits.open(self.temp('scale.fits'), mode='update',
                           save_backup=True) as hdul:
                # Make some changes to the original file to force its header
                # and data to be rewritten
                hdul[0].header['TEST'] = 'TEST'
                hdul[0].data[0] = 0

        assert os.path.exists(self.temp('scale.fits.bak'))
        with fits.open(self.data('scale.fits'),
                       do_not_scale_image_data=True) as hdul1:
            with fits.open(self.temp('scale.fits.bak'),
                           do_not_scale_image_data=True) as hdul2:
                assert hdul1[0].header == hdul2[0].header
                assert (hdul1[0].data == hdul2[0].data).all()

        with ignore_warnings():
            with fits.open(self.temp('scale.fits'), mode='update',
                           save_backup=True) as hdul:
                # One more time to see if multiple backups are made
                hdul[0].header['TEST2'] = 'TEST'
                hdul[0].data[0] = 1

        assert os.path.exists(self.temp('scale.fits.bak'))
        assert os.path.exists(self.temp('scale.fits.bak.1'))
コード例 #14
0
ファイル: pixelize.py プロジェクト: balbinot/ugali
def pixelizeCatalog(infiles, config, force=False):
    """
    Break catalog up into a set of healpix files.
    """
    nside_catalog = config['coords']['nside_catalog']
    nside_pixel = config['coords']['nside_pixel']
    outdir = mkdir(config['catalog']['dirname'])
    filenames = config.getFilenames()
    
    for ii,infile in enumerate(infiles):
        logger.info('(%i/%i) %s'%(ii+1, len(infiles), infile))
        f = pyfits.open(infile)
        data = f[1].data
        header = f[1].header
        logger.info("%i objects found"%len(data))
        if not len(data): continue
        glon,glat = cel2gal(data['RA'],data['DEC'])
        catalog_pix = ang2pix(nside_catalog,glon,glat,coord='GAL')
        pixel_pix = ang2pix(nside_pixel,glon,glat,coord='GAL')
        names = [n.upper() for n in data.columns.names]
        ra_idx = names.index('RA'); dec_idx = names.index('DEC')
        idx = ra_idx if ra_idx > dec_idx else dec_idx
        catalog_pix_name = 'PIX%i'%nside_catalog
        pixel_pix_name = 'PIX%i'%nside_pixel

        coldefs = pyfits.ColDefs(
            [pyfits.Column(name='GLON',format='1D',array=glon),
             pyfits.Column(name='GLAT',format='1D',array=glat),
             pyfits.Column(name=catalog_pix_name,format='1J',array=catalog_pix),
             pyfits.Column(name=pixel_pix_name  ,format='1J',array=pixel_pix)]
        )
        hdu = pyfits.new_table(data.columns[:idx+1]+coldefs+data.columns[idx+1:])
        table = hdu.data

        for pix in numpy.unique(catalog_pix):
            logger.debug("Processing pixel %s"%pix)
            outfile = filenames.data['catalog'][pix]
            if not os.path.exists(outfile):
                logger.debug("Creating %s"%outfile)
                names = [n.upper() for n in table.columns.names]
                formats = table.columns.formats
                columns = [pyfits.Column(n,f) for n,f in zip(names,formats)]
                out = pyfits.HDUList([pyfits.PrimaryHDU(),pyfits.new_table(columns)])
                out[1].header['NSIDE'] = nside_catalog
                out[1].header['PIX'] = pix
                out.writeto(outfile)
            hdulist = pyfits.open(outfile,mode='update')
            t1 = hdulist[1].data
            # Could we speed up with sorting and indexing?
            t2 = table[ table[catalog_pix_name] == pix ]
            nrows1 = t1.shape[0]
            nrows2 = t2.shape[0]
            nrows = nrows1 + nrows2
            out = pyfits.new_table(t1.columns, nrows=nrows)
            for name in t1.columns.names:
                out.data.field(name)[nrows1:]=t2.field(name)
            hdulist[1] = out
            logger.debug("Writing %s"%outfile)
            hdulist.flush()
            hdulist.close()
コード例 #15
0
def CoaddExposures(exposures, outnumber, datapath='/nfs/lsst2/photocalData/data/observer2/', verbose=False, normalise=False):
    import pyfits as pf
    import shutil
    import sys
    
    N_HDUS = 70
    print 'Coadding %s'%(exposures); sys.stdout.flush()
    n_exp = float(len(exposures))

    filenames = [datapath + 'DECam_00' + str(_) + '.fits.fz' for _ in exposures]
    outfilename = datapath + 'DECam_0' + str(9000000 + outnumber) + '.fits.fz' 
    
    shutil.copyfile(filenames[0],outfilename,)
    out_file = pf.open(outfilename, mode='update')
    
    primaryHeader = out_file[0].header
    total_EXPTIME  = primaryHeader['EXPTIME']
    total_EXPREQ   = primaryHeader['EXPREQ']
    total_DARKTIME = primaryHeader['DARKTIME']

    # convert all arrays to floats for summing and dividing purposes
    if verbose: print 'loading first file & converting dtype'
    for hdu in range(1, N_HDUS+1):
        out_file[hdu].data = out_file[hdu].data.astype(np.float32)
    
    # add other files to the original, collecting relevant metadata
    for i, filename in enumerate(filenames[1:]):
        this_file = pf.open(filename)
        total_EXPTIME  += this_file[0].header['EXPTIME']
        total_EXPREQ   += this_file[0].header['EXPREQ']
        total_DARKTIME += this_file[0].header['DARKTIME']

        for hdu in range(1, N_HDUS+1):
            if verbose: print 'adding hdu %s for file %s of %s'%(hdu,i+2,n_exp)
            out_file[hdu].data += this_file[hdu].data
    
    # Normalise
    if normalise:
        for hdu in range(1, N_HDUS+1):
            if verbose: print 'Normalising hdu %s'%hdu
            out_file[hdu].data /= n_exp

    # Update headers
    primaryHeader['nCOADDED'] = n_exp
    primaryHeader['filename'] = 'DECam_0' + str(9000000 + outnumber) + '.fits'
    primaryHeader['expnum']   = 9000000 + outnumber
    primaryHeader['COADD_OF'] = str(['DECam_00' + str(_) for _ in exposures]).translate(None, ''.join(['[',']',' ','\'']))
    primaryHeader['COADNUMS'] = (str(exposures).translate(None, ''.join(['[',']',' '])))
    if not normalise: n_exp = 1.
    primaryHeader['NORMED']   = str(normalise)
    primaryHeader['EXP_TOT']  = total_EXPTIME # always equal to the total exposure time
    primaryHeader['DARK_TOT'] = total_DARKTIME # always equal to the total darktime
    primaryHeader['EXP_T_EQ'] = total_EXPTIME / n_exp #equivalent expousre time, depending on noralisation
    primaryHeader['EXPREQ']   = total_EXPREQ / n_exp #equivalent EXPREQ time, depending on noralisation
    primaryHeader['DARKTIME'] = total_DARKTIME / n_exp #equivalent DARKTIME time, depending on noralisation

    if verbose: print 'Headers updated, writing to disk...'; sys.stdout.flush()
    out_file.flush()
    out_file.close()
    if verbose: print 'Fished coaddition of %s, written to %s'%(exposures, outfilename)
コード例 #16
0
    def test_compressed_image_data_float32(self):
        n = np.arange(100, dtype='float32')
        hdu = fits.ImageHDU(n)
        comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
        comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
        hdu.writeto(self.temp('uncomp.fits'), checksum=True)
        with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
            assert np.all(hdul[1].data == comp_hdu.data)
            assert np.all(hdul[1].data == hdu.data)
            assert 'CHECKSUM' in hdul[0].header
            assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
            assert 'DATASUM' in hdul[0].header
            assert hdul[0].header['DATASUM'] == '0'

            assert 'CHECKSUM' in hdul[1].header
            assert 'DATASUM' in hdul[1].header

            if not sys.platform.startswith('win32'):
                assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH'
                assert hdul[1]._header['DATASUM'] == '1277667818'

            with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
                header_comp = hdul[1]._header
                header_uncomp = hdul2[1].header
                assert 'ZHECKSUM' in header_comp
                assert 'CHECKSUM' in header_uncomp
                assert header_uncomp['CHECKSUM'] == 'Cgr5FZo2Cdo2CZo2'
                assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
                assert 'ZDATASUM' in header_comp
                assert 'DATASUM' in header_uncomp
                assert header_uncomp['DATASUM'] == '2393636889'
                assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
コード例 #17
0
ファイル: iofits4.py プロジェクト: bnikolic/oof
def Select(dirin, filein , selfn, dirout,
           overwrite=False):
    "Select a subsample from ma table "

    """
    If overwrite is False, will skip existing files in destination
    directory.
    """

    mask=[ selfn(row) for row in pyfits.open(filein)[1].data ]
    mind=[ x for x,f in  enumerate (mask) if f]
    nrows=len(mind)

    print "Selected %i rows." % nrows
    
    fitsel=re.compile(".*fits?")
    flist = [ fnamein for fnamein in os.listdir(dirin) if fitsel.match(fnamein) ]
    
    for fnamein in flist:
        foutname=os.path.join(dirout,fnamein)

        if os.access(foutname, os.F_OK) and (not overwrite):
            print "Skipping %s as it already exists" % fnamein
        else:
            fin=pyfits.open(os.path.join(dirin,fnamein))

            newtab=pyfits.new_table( fin[1].columns , nrows= nrows)
            for cname in fin[1].columns.names:
                newtab.data.field(cname)._copyFrom( fin[1].data.field(cname)[ mind] )

        
            Write([pyfits.PrimaryHDU(), newtab],
                  foutname,
                  overwrite=1)
コード例 #18
0
ファイル: podi_imarith.py プロジェクト: WIYN-ODI/QuickReduce
def imarith(input_1, op, input_2, output, simple):

    stdout_write("\nOpening input files ...")
    # Open both input fits files
    hdu_1 = pyfits.open(input_1)

    numeric_2 = None
    hdu_2 = None
    if (not os.path.isfile(input_2)):
        numeric_2 = float(input_2)
        print(numeric_2)
    else:
        hdu_2 = pyfits.open(input_2)

    stdout_write(" done!\n")

    rebin_fac = int(cmdline_arg_set_or_default("-bin", 1))
    
    output_hdu = hdu_imarith(hdu_1, op, hdu_2,
                         simple=simple,
                         numeric_2=numeric_2,
                         rebin_fac=rebin_fac)

    stdout_write(" writing output ...")
    clobberfile(output)
    output_hdu.writeto(output, output_verify='fix+ignore')
    stdout_write(" done!\n\n")

    return
コード例 #19
0
    def test_compressed_image_data_int16(self):
        n = np.arange(100, dtype='int16')
        hdu = fits.ImageHDU(n)
        comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
        comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
        hdu.writeto(self.temp('uncomp.fits'), checksum=True)
        with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
            assert np.all(hdul[1].data == comp_hdu.data)
            assert np.all(hdul[1].data == hdu.data)
            assert 'CHECKSUM' in hdul[0].header
            assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
            assert 'DATASUM' in hdul[0].header
            assert hdul[0].header['DATASUM'] == '0'

            assert 'CHECKSUM' in hdul[1].header
            assert hdul[1]._header['CHECKSUM'] == 'J5cCJ5c9J5cAJ5c9'
            assert 'DATASUM' in hdul[1].header
            assert hdul[1]._header['DATASUM'] == '2453673070'
            assert 'CHECKSUM' in hdul[1].header

            with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
                header_comp = hdul[1]._header
                header_uncomp = hdul2[1].header
                assert 'ZHECKSUM' in header_comp
                assert 'CHECKSUM' in header_uncomp
                assert header_uncomp['CHECKSUM'] == 'ZE94eE91ZE91bE91'
                assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
                assert 'ZDATASUM' in header_comp
                assert 'DATASUM' in header_uncomp
                assert header_uncomp['DATASUM'] == '160565700'
                assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
コード例 #20
0
    def test_compressed_image_data_float32(self):
        n = np.arange(100, dtype='float32')
        hdu = fits.ImageHDU(n)
        comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
        comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
        hdu.writeto(self.temp('uncomp.fits'), checksum=True)
        with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
            assert np.all(hdul[1].data == comp_hdu.data)
            assert np.all(hdul[1].data == hdu.data)
            assert 'CHECKSUM' in hdul[0].header
            assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
            assert 'DATASUM' in hdul[0].header
            assert hdul[0].header['DATASUM'] == '0'

            assert 'CHECKSUM' in hdul[1].header
            assert 'DATASUM' in hdul[1].header
            assert 'CHECKSUM' in hdul[1].header
            if sys.platform != 'win32':
                # The checksum ends up being different on Windows, possibly due
                # to slight floating point differences
                # TODO: In Astropy mark these properly as known fail
                assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH'
                assert hdul[1]._header['DATASUM'] == '1277667818'

            with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
                header_comp = hdul[1]._header
                header_uncomp = hdul2[1].header
                assert 'ZHECKSUM' in header_comp
                assert 'CHECKSUM' in header_uncomp
                assert header_uncomp['CHECKSUM'] == 'Cgr5FZo2Cdo2CZo2'
                assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
                assert 'ZDATASUM' in header_comp
                assert 'DATASUM' in header_uncomp
                assert header_uncomp['DATASUM'] == '2393636889'
                assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
コード例 #21
0
def main(options):
    if options.qFile == '':
        raise Exception('An input Q or U file must be specified.')
    if options.freq == '':
        raise Exception('A file containing frequency list must be specified.')
    if options.polInt == '':
        raise Exception('An input polarized intensity map must be specified.')
    # If all input options are valid:
    print 'INFO: Reading the input files'
    try:
        qData   = pf.open(options.qFile)[0].data
        pData   = pf.open(options.polInt)[0].data
        header  = pf.open(options.polInt)[0].header
        freqFile = open(options.freq)
    except:
        raise Exception('Unable to read the input files.')
    print 'INFO: Image have the following dimensions:'
    print 'Stokes Q:', qData.shape
    print 'Stokes U:', pData.shape
    if qData.shape[1] != pData.shape[1] or qData.shape[2] != pData.shape[2]:
        raise Exception('Input fits have different image dimensions.')
    # Estimate the number of frequencies listed in the freq file
    freqArray   = []
    for line in freqFile:
        freqArray.append(float(line))
    print 'INFO: {:} frequencies list in the input file'.format(len(freqArray))
    if len(freqArray) != qData.shape[0]:
        raise Exception('No. of frequency channels in input files do not match.')
    # Compute variance in \lambda^2: \sigma^2_{\lambda^2}
    print 'INFO: Computing variance in lambda^2'
    freqArray    = np.asarray(freqArray)
    lambdaVals   = lightSpeed / freqArray
    lambda_pow2  = np.square(lambdaVals)
    varLambda2   = np.absolute(np.var(lambda_pow2))

    # Estimate the noise in the Q-cube along each line of sight
    print 'INFO: Estimating noise variance in Q'
    varInQ     = np.zeros(pData.shape)
    for i in range(pData.shape[1]):
        for j in range(pData.shape[2]):
            varInQ[0,i,j] = np.absolute(np.var(qData[:,i,j]))
    hdu = pf.PrimaryHDU(data=varInQ, header=header)
    hdu.writeto('varInQ.fits', clobber=True)
    del hdu
    
    # Compute the variance in RM using equation 2.73 in Brentjens' thesis
    print 'INFO: Computing variance of RM'
    denom = 4*(len(freqArray)-2) * np.square(pData) * varLambda2
    varInRM = np.absolute(np.divide(varInQ, denom))
    hdu = pf.PrimaryHDU(data=varInRM)
    hdu.writeto('varInRM.fits', clobber=True)
    del hdu
    print varInRM.shape
    
    # Compute noise in RM
    print 'INFO: Computing standard deviation in RM'
    noiseInRM = np.sqrt(varInRM)
    hdu = pf.PrimaryHDU(data=noiseInRM, header=header)
    hdu.writeto('noiseInRM.fits', clobber=True)
    del hdu
コード例 #22
0
	def get_ABmag_HST(self,band='f105w'):
		''' 
		-----PURPOSE-----
		Calculate the AB magnitude from the HST photometry in the nearest band to MOSFIRE Y-band
		-----INPUT-------
		band 		The photometric band in which you want magnitudes, default is 'f105w'
		'''
		ra, dec = self.radec
		# mags = p.open(fitscat)[1].data['%s_mag_iso' % band]
		mags = p.open(self.photfits)[1].data['%s_mag_AUTO' % band]
		phot_ras = p.open(self.photfits)[1].data['ALPHA_J2000']
		phot_decs = p.open(self.photfits)[1].data['DELTA_J2000']
		thresh=0.35
		mindist=thresh
		for jj in range(len(mags)):
			phot_ra, phot_dec = phot_ras[jj], phot_decs[jj]
			mag = mags[jj]
			# phot_F125W_magerr = phot_F125W_magerrs[jj]
			dist = np.sqrt(((ra-phot_ra)*3600*np.cos(np.pi/180*phot_dec))**2 + ((dec-phot_dec)*3600)**2) # in arcseconds
			if dist < mindist:
				keep_mag = mag
				keep_ra = phot_ra
				keep_dec = phot_dec
				# keep_id = phot_id
				mindist=dist
		if mindist >= thresh: # no match
			sys.exit("NO MATCH")
		return keep_mag	
コード例 #23
0
ファイル: big_table.py プロジェクト: eigenbrot/snakes
def compute_DC(pointing, folder, uw_chi):

    CI_file = glob('{}/*P{}*CI*.dat'.format(folder,pointing))[0]
    
    bestZ = np.loadtxt(CI_file, usecols=(5,), unpack=True, dtype=np.int)
    
    fzlist = ['0.005Z','0.02Z','0.2Z','0.4Z','1Z','2.5Z','allZ']

    hdu = pyfits.open('NGC_891_P{}_bin30.mso.fits'.format(pointing))[0]
    head = hdu.header
    data = hdu.data
    error = pyfits.open('NGC_891_P{}_bin30.meo.fits'.format(pointing))[0].data

    wave = (np.arange(data.shape[1]) - head['CRPIX1'] - 1)*head['CDELT1'] + head['CRVAL1']
    idx = np.where((wave >= 3800.) & (wave <= 6800.))[0]
    wave = wave[idx]
    data = data[:,idx]
    error = error[:,idx]    

    outarr = np.zeros(data.shape[0])

    for i, bz in enumerate(bestZ):
        best_file = '{}/{}/NGC_891_P{}_bin30_allz2.fit.fits'.\
                    format(folder,fzlist[bz],pointing)
        print i+1, fzlist[bz]
        models = pyfits.open(best_file)[0].data

        coef_file = '{}/{}/NGC_891_P{}_bin30_allz2.coef.fits'.\
                    format(folder,fzlist[bz],pointing)
        coefs = pyfits.open(coef_file)[1].data
        
        chisq = np.sum((data[i,:] - models[i,:])**2/error[i,:]**2)/coefs['TOTFREE'][i]
        outarr[i] = uw_chi[i] - chisq

    return outarr
コード例 #24
0
ファイル: makeALMAplot.py プロジェクト: coolastro/SiOJets_New
def fig11():
    hdulist1 = pf.open('/import/phy-pc1064_a/Documents/SiOJets_New/run4ALMA/imageShock_J2-1_45deg_molcool_dirty.fits.gz')
    hdulist2 = pf.open('/import/phy-pc1064_a/Documents/SiOJets_New/run4ALMA/imageShock_J5-4_45deg_molcool_dirty.fits.gz')
    hdulist3 = pf.open('/import/phy-pc1064_a/Documents/SiOJets_New/run4ALMA/imageShock_J8-7_45deg_molcool_dirty.fits.gz')
    
    
    A = hdulist1[0].header

    RAarr = [A['CRVAL1'] - i*A['CDELT1'] for i in range(A['NAXIS1']/2)] + [A['CRVAL1'] + i*A['CDELT1'] for i in range(A['NAXIS1']/2,A['NAXIS1'])]
    DECarr = [A['CRVAL2'] - i*A['CDELT2'] for i in range(A['NAXIS2']/2)] + [A['CRVAL2'] + i*A['CDELT2'] for i in range(A['NAXIS2']/2,A['NAXIS2'])]
    RAarr2 = RA2ICRS(RAarr)
    DECarr2 = DEC2ICRS(DECarr)
    VELarr2 = Velarr(pi/4.,100.0, A)
    print VELarr2[0]

    clevs1 = [0.0005, 0.001, 0.005, 0.025, 0.125]
    clevs2 = [0.01,0.05, 0.1, 0.2, 0.4,0.8,1.2] 
    # Get ALMA images
    f1 = plt.figure(figsize=[11,8])
    plt.subplots_adjust(wspace=0.05)
    ax1 = f1.add_subplot(121)
    
    im1 = imshow(hdulist1[0].data[:,:,:].sum(0),origin='image',vmin = 0.001, vmax = 0.25,cmap=cm.gist_heat)
    plt.colorbar(im1,ticks=clevs2)
    im2 = contour(hdulist3[0].data[:,:,:].sum(0),levels=clevs2,colors='b',linewidths=.70)
    im3 = contour(hdulist2[0].data[:,:,:].sum(0),levels=clevs2,colors='g',linewidths=.70)
    
    axis([100.0,250.0,0.0,320.0])
    ax1.xaxis.set_major_locator(MaxNLocator(4))
    ax1.yaxis.set_major_locator(MaxNLocator(4))
    locs,labels = plt.xticks()
    plt.xticks(locs,[RAarr2[int(i)] for i in locs],rotation=15)
    locs,labels = plt.yticks()
    plt.yticks(locs[1:-1],[DECarr2[int(i)] for i in locs[1:-1]],rotation=90)
    ax1.set_ylabel('Declination [J2000]')
    ax1.set_xlabel('Right Ascention [J2000]')
    plt.figtext(0.16,0.2,r'Image [Jy/beam] : 2-$>$1',color='r')
    plt.figtext(0.16,0.175,r'Green Contour : 5-$>$4',color='g')
    plt.figtext(0.16,0.15,r'Blue Contour : 8-$>$7',color='b')
    
    ax2 = f1.add_subplot(122)
    
    im1b = imshow(hdulist1[0].data[:,:,175].T,origin='image', vmax = 0.025,cmap=cm.gist_heat)
    plt.colorbar(im1b,ticks=clevs1)
    im2b = contour(hdulist3[0].data[:,:,175].T,levels=clevs1,colors='b',linewidths=.70)
    im3b = contour(hdulist2[0].data[:,:,175].T,levels=clevs1,colors='g',linewidths=.70)
    
    axis([0.0,150.0,0.0,320.0])
    ax2.xaxis.set_major_locator(MaxNLocator(4))
    #ax2.yaxis.set_major_locator(MaxNLocator(4))
    locs,labels = plt.xticks()
    plt.xticks(locs,[str('%.1f'%VELarr2[int(i)]) for i in locs])
    plt.setp(ax2,yticks=[])
    ax2.set_xlabel(r'Velocity [km s$^{-1}$]')
    
    plt.figtext(0.53,0.2,r'PV Diagram [Jy/beam] : 2-$>$1',color='r')
    plt.figtext(0.53,0.175,r'Green Contour : 5-$>$4',color='g')
    plt.figtext(0.53,0.15,r'Blue Contour : 8-$>$7',color='b')
    
    plt.show()
コード例 #25
0
ファイル: fixTHINGS.py プロジェクト: astroPDR/PMAP
def FixTHINGS(imageIn, imageOut):
    
    print
    sys.stdout.write('Fixing file %s ... ' % imageIn)
    sys.stdout.flush()
    
    if imageOut != imageIn:
        hdu = pf.open(imageIn)
    else:
        hdu = pf.open(imageIn, mode='update')
        
    dataNew = hdu[0].data[0,0,:,:]
    
    del hdu[0].header['CTYPE3']; del hdu[0].header['CDELT3'];  del hdu[0].header['CRVAL3']
    del hdu[0].header['CRPIX3']; del hdu[0].header['CROTA3']
    del hdu[0].header['CTYPE4']; del hdu[0].header['CDELT4']; del hdu[0].header['CRVAL4']
    del hdu[0].header['CRPIX4']; del hdu[0].header['CROTA4']

    if imageOut != imageIn:
        if os.path.exists(imageOut): os.remove(imageOut)
        pf.writeto(imageOut, dataNew, hdu[0].header)
    else:
        hdu[0].data = dataNew
        hdu.flush()
    
    print 'Done'
    
    print
    return
コード例 #26
0
ファイル: test_core.py プロジェクト: coleb/PyFITS
    def test_uint(self):
        hdulist_f = fits.open(self.data('o4sp040b0_raw.fits'))
        hdulist_i = fits.open(self.data('o4sp040b0_raw.fits'), uint=True)

        assert hdulist_f[1].data.dtype == np.float32
        assert hdulist_i[1].data.dtype == np.uint16
        assert np.all(hdulist_f[1].data == hdulist_i[1].data)
コード例 #27
0
ファイル: test_core.py プロジェクト: coleb/PyFITS
    def test_hdu_fromstring(self):
        """
        Tests creating a fully-formed HDU object from a string containing the
        bytes of the HDU.
        """

        dat = open(self.data('test0.fits'), 'rb').read()

        offset = 0
        with fits.open(self.data('test0.fits')) as hdul:
            hdulen = hdul[0]._data_offset + hdul[0]._data_size
            hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
            assert isinstance(hdu, fits.PrimaryHDU)
            assert hdul[0].header == hdu.header
            assert hdu.data is None

        hdu.header['TEST'] = 'TEST'
        hdu.writeto(self.temp('test.fits'))
        with fits.open(self.temp('test.fits')) as hdul:
            assert isinstance(hdu, fits.PrimaryHDU)
            assert hdul[0].header[:-1] == hdu.header[:-1]
            assert hdul[0].header['TEST'] == 'TEST'
            assert hdu.data is None

        with fits.open(self.data('test0.fits'))as hdul:
            for ext_hdu in hdul[1:]:
                offset += hdulen
                hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
                hdu = fits.ImageHDU.fromstring(dat[offset:offset + hdulen])
                assert isinstance(hdu, fits.ImageHDU)
                assert ext_hdu.header == hdu.header
                assert (ext_hdu.data == hdu.data).all()
コード例 #28
0
ファイル: skyvar.py プロジェクト: luminosa42/astr596
def skyvar(wave, sky = 'kecksky.fits'):

    if False:
        sky = pyfits.open("kecksky.fits")
        crval = sky[0].header['CRVAL1']
        delta = sky[0].header['CDELT1']
        sky_flux = sky[0].data[0]
        print "Keck Sky used"
        
    else:
        sky = pyfits.open("licksky.fits")
        crval = sky[0].header['CRVAL1']
        delta = sky[0].header['CDELT1']        
        sky_flux = sky[0].data
        print "Lick sky used"

    start = crval - math.ceil(0.5*len(sky_flux)*delta)
    stop = crval + math.ceil(0.5*len(sky_flux)*delta)

    sky_wave = [(start+delta*i) for i in range(len(sky_flux))]

    plt.plot(sky_wave, sky_flux)
    plt.show()

    return new_sky
コード例 #29
0
ファイル: MOPplant.py プロジェクト: OSSOS/MOP
def plant(image,psf,outfile,list,dtime):
    import pyfits,os 
    import numarray as N
    psf_f=pyfits.open(psf)
    psf_flux=psf_f[0].data.sum()
    psf_x_size=psf_f[0].header.get('NAXIS1',0)
    psf_y_size=psf_f[0].header.get('NAXIS2',0)
    psf_x=psf_f[0].header.get('PSF_X',0)
    psf_y=psf_f[0].header.get('PSF_Y',0)
    psf_mag=psf_f[0].header.get('PSFMAG',26.0)
    image_f=pyfits.open(image)
    xmax=image_f[0].header.get('NAXIS1',0)
    ymax=image_f[0].header.get('NAXIS2',0)
    exptime=image_f[0].header.get('EXPTIME',1)
    zeropoint=image_f[0].header.get('PHOT_C',26.5)

    import mop_files
    ahdu=mop_files.read(list)
    import string,math,re
    from numarray.nd_image.interpolation import shift as shift

    from string import atof
    for i in range(len(ahdu['data']['x'])):
        x=float(ahdu['data']['x'][i])
        y=float(ahdu['data']['y'][i])
        mag=float(ahdu['data']['mag'][i])
        rate=float(ahdu['data']['pix_rate'][i])/3600.0
        angle=float(ahdu['data']['angle'][i])
        x_shift_rate=rate*math.cos(angle/57.3)
        y_shift_rate=rate*math.sin(angle/57.3)
        #flux=exptime*10**((zeropoint-mag)/2.5)
        #scale=flux/psf_flux
        scale=10**((psf_mag-mag)/2.5)*exptime
        #print scale
        niter=int(rate*exptime)+1
        scale=scale/niter
        dt = exptime/niter
        #print x,y,mag,niter
        for i in range(niter):
            curtime = dtime+dt*i
            x=x+x_shift_rate*curtime
            y=y+y_shift_rate*curtime
            x1=int(max(0,x-psf_x))
            x2=int(min(xmax,x+psf_x_size-psf_x))
            y1=int(max(0,y-psf_y))
            y2=int(min(ymax,y+psf_y_size-psf_y))
            #print x2,x1,y2,y1
            px1=int((psf_x-(x-x1)))
            px2=int(px1+(x2-x1))
            py1=int(psf_y-(y-y1))
            py2=int(py1+(y2-y1))
            sec = psf_f[0].data[py1:py2,px1:px2].copy()
            sec = shift(sec,(y-int(y),x-int(x)),order=3)
            #print sec.shape,y2-y1,x2-x1
            #print "Adding @ ",x,y,mag,scale," data=> ",y1,y2,x1,x2," PSF=> ",py1,py2,px1,px2
            
            image_f[0].data[y1:y2,x1:x2]+=scale*sec

    image_f.writeto(outfile)
    image_f.close()
コード例 #30
0
ファイル: load_templates.py プロジェクト: kadubarbosa/hydra1
def emission_templates(velscale):
    """ Load files with stellar library used as templates. """
    current_dir = os.getcwd()
    # Template directory is also set in setyp.py
    os.chdir(template_dir)
    emission = [x for x in os.listdir(".") if x.startswith("emission") and x.endswith(".fits")]
    emission.sort()
    c = 299792.458
    FWHM_tem = 2.1  # MILES library spectra have a resolution FWHM of 2.54A.
    # Extract the wavelength range and logarithmically rebin one spectrum
    # to the same velocity scale of the SAURON galaxy spectrum, to determine
    # the size needed for the array which will contain the template spectra.
    #
    hdu = pf.open(emission[0])
    ssp = hdu[0].data
    h2 = hdu[0].header
    lamRange2 = h2["CRVAL1"] + np.array([0.0, h2["CDELT1"] * (h2["NAXIS1"] - 1)])
    sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp, velscale=velscale)
    templates = np.empty((sspNew.size, len(emission)))
    for j in range(len(emission)):
        hdu = pf.open(emission[j])
        ssp = hdu[0].data
        sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp, velscale=velscale)
        templates[:, j] = sspNew
    # templates *= 1e5 # Normalize templates
    os.chdir(current_dir)
    return templates, logLam2, h2["CDELT1"], emission
コード例 #31
0
ファイル: cc_centroid.py プロジェクト: t-brandt/acorns-adi
def cc_centroid(refimage, image=None, frame=None, usemask=True, side=None):
    """
    function cc_centroid(refimage, image=None, frame=None)
    
    refimage should be a 2D or 3D numpy.ndarray.  If a 3D array,
    the first index runs over the template images.

    Must supply either image, a 2D numpy.ndarray to be centroided,
    or frame, the filename from which that image may be loaded.

    The function returns the centroid [yc, xc] if successful, None
    otherwise.

    Description:
    cc_centroid finds the centroid of the input image using the
    following algorithm:
    1. Flag saturated pixels, centroid the greatest concentration of
    such pixels to compute a provisional center.
    2. Mask pixels near the provisional center, compute a variance for
    all other pixels.  Variance = shot noise + read noise.
    3. Fit the PSF templates using \chi^2 at a grid of offsets.
    4. Centroid the map of \chi^2 merit statistics.

    """

    np.seterr(all='ignore')

    ####################################################################
    # Locate data if no frame supplied, load data
    ####################################################################

    if image is None and frame is None:
        print "Error: must supply either data or a filename to crosscorr_centroid."
        sys.exit(1)
    elif image == None:
        if not "_dw.fits" in frame:
            frame_dw = re.sub(".fits", "_dw.fits", frame)
        else:
            frame_dw = frame
        try:
            image = pyf.open(frame_dw)[-1].data
        except:
            frame_ds = re.sub(".fits", "_ds.fits", frame)
            try:
                image = pyf.open(frame_ds)[-1].data
            except:
                print "Error, cannot read data from " + frame_ds
                sys.exit(1)

    ####################################################################
    # Add the capability to only search the left or right half of the
    # image
    ####################################################################

    image_save = image.copy()
    dimy, dimx = image.shape
    if side is not None:
        if re.search('[Ll]eft', side):
            image[:, dimx // 2:] = 0
        elif re.search('[Rr]ight', side):
            image[:, :dimx // 2] = 0

    ####################################################################
    # Find approximate centroid by flagging (near-)saturated pixels
    # and locating the greatest concentration of them
    ####################################################################

    sat = min(image.max() * 0.7, 1e5)
    x = np.arange(image.shape[1])
    y = np.arange(image.shape[0])
    x, y = np.meshgrid(x, y)
    satpts = image > 0.8 * sat
    image = image_save

    maxpts = 0
    imax, jmax = [0, 0]
    for i in range(100, image.shape[0] - 100, 100):
        for j in range(100, image.shape[1] - 100, 100):
            npts = np.sum(satpts[i - 100:i + 100, j - 100:j + 100])
            if npts > maxpts:
                maxpts = npts
                imax, jmax = [i, j]

    ####################################################################
    # Check to see that this guess is in the central half of the FOV.
    # Then refine the estimate by calculating the mean position of the
    # (near-)saturated pixels in the neighborhood of the guess.
    # Do this iteratively, with the final estimate computed from a
    # 100x100 pixel region.
    ####################################################################

    di, dj = [image.shape[0] // 2, image.shape[1] // 2]
    if side is None and (np.abs(imax - di) > di / 2
                         or np.abs(jmax - dj) > dj / 2):
        return None  # failure

    for di in range(100, 70, -10):
        npts = 1. * np.sum(satpts[imax - di:imax + di, jmax - di:jmax + di])
        yc = np.sum(satpts[imax - di:imax + di, jmax - di:jmax + di] *
                    y[imax - di:imax + di, jmax - di:jmax + di]) / npts
        xc = np.sum(satpts[imax - di:imax + di, jmax - di:jmax + di] *
                    x[imax - di:imax + di, jmax - di:jmax + di]) / npts
        try:
            imax, jmax = [int(yc), int(xc)]
        except:
            return None  # failure

    ####################################################################
    # Calculate the typical saturation radius; cap at 700 mas
    ####################################################################

    dr_rms = np.sum(satpts[imax - di:imax + di, jmax - di:jmax + di] *
                    (y[imax - di:imax + di, jmax - di:jmax + di] - yc)**2)
    dr_rms += np.sum(satpts[imax - di:imax + di, jmax - di:jmax + di] *
                     (x[imax - di:imax + di, jmax - di:jmax + di] - xc)**2)
    dr_rms = np.sqrt(dr_rms / npts)
    dr_rms = min(dr_rms, 70)

    center = [imax, jmax]

    ####################################################################
    # Verify shape of reference PSF
    ####################################################################

    if len(refimage.shape) == 2:
        dimy, dimx = refimage.shape
        nref = 1
    elif len(refimage.shape) == 3:
        nref, dimy, dimx = refimage.shape
    else:
        print "Reference image must be a single 2D image or an array of 2D images."
        sys.exit(1)
    if dimy % 2 == 0 or dimx % 2 == 0 or dimy != dimx:
        print "Reference image to crosscorr_centroid must be square and\nhave an odd dimension."
        sys.exit(1)

    ####################################################################
    # Mask questionable data in the image, reshape arrays
    ####################################################################

    di = dimy // 2
    r_im = np.sqrt((x[imax - di:imax + di, jmax - di:jmax + di] - jmax)**2 +
                   (y[imax - di:imax + di, jmax - di:jmax + di] - imax)**2)
    mask = np.all([image < 0.5 * sat, image > 0], axis=0)

    baddata = np.all([
        image[imax - di:imax + di, jmax - di:jmax + di] < 0.2 * sat,
        r_im < 2 * dr_rms
    ],
                     axis=0)

    if usemask:
        np.putmask(mask[imax - di:imax + di, jmax - di:jmax + di],
                   r_im < 1.5 * dr_rms, 0)
        np.putmask(mask[imax - di:imax + di, jmax - di:jmax + di], baddata, 0)
    refimage2 = np.reshape(refimage, (nref, -1))

    sub_istd = np.ndarray(refimage2.shape)
    if usemask:
        istd = np.sqrt(mask / (np.abs(image) + 200))
    else:
        istd = np.sqrt(1 / (np.abs(image) + 200))

    ####################################################################
    # Produce an nxn map of chi2 as a function of offset.
    # Use SVD to do the fitting at each offset.
    ####################################################################

    chi2_best = np.inf
    n = 21
    x = np.arange(n) - n // 2
    x, y = np.meshgrid(x, x)

    chi2 = np.zeros((n, n))
    ybest, xbest = [0, 0]
    for i in range(n):
        for j in range(n):
            y1 = center[0] + y[i, j] - dimy // 2
            x1 = center[1] + x[i, j] - dimx // 2

            subarr = np.reshape(image[y1:y1 + dimy, x1:x1 + dimx], -1)
            for k in range(nref):
                sub_istd[k] = np.reshape(istd[y1:y1 + dimy, x1:x1 + dimx], -1)
            A = sub_istd * refimage2
            b = sub_istd[0] * subarr
            coef = linalg.lstsq(A.T, b)[0]

            # Compute residuals, sum to get chi2
            resid = subarr - coef[0] * refimage2[0]
            for k in range(1, nref):
                resid -= coef[k] * refimage2[k]
            chi2[i, j] = np.sum((resid * sub_istd[0])**2)

            if chi2[i, j] < chi2_best:
                chi2_best = chi2[i, j]
                ibest, jbest = [i, j]

    ####################################################################
    # Take a 5x5 map around the best chi2, centroid this.
    # If that 5x5 map would be off the grid, return the initial guess.
    # If the centroiding fails (result falls outside the 5x5 grid),
    # return the [y, x] with the best chi2.
    ####################################################################

    ybest0, xbest0 = [y[ibest, jbest], x[ibest, jbest]]
    p0 = [chi2_best, 2., 2., ybest0, xbest0]
    if ibest < 2 or ibest >= n - 3 or jbest < 2 or jbest >= n - 3:
        return None  #failure

    x = np.reshape(x[ibest - 2:ibest + 3, jbest - 2:jbest + 3], -1)
    y = np.reshape(y[ibest - 2:ibest + 3, jbest - 2:jbest + 3], -1)
    chi2 = np.reshape(chi2[ibest - 2:ibest + 3, jbest - 2:jbest + 3], -1)

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        p1, success = optimize.leastsq(errorfunc, p0[:], args=(y, x, chi2))
    ybest, xbest = [p1[3], p1[4]]

    if ybest > y.min() and ybest < y.max() and xbest > x.min(
    ) and xbest < x.max():
        return [center[0] + ybest, center[1] + xbest, dr_rms]
    else:
        return None  # failure
コード例 #32
0
        continue
    if name in ['multi_chan_beam']:
        multi_chan_beam = value
        continue
    if name in write_catalog:
        write_opts[name] = value
    elif name in ['freq0', 'frequency']:
        freq0 = value
    else:
        img_opts[name] = value
        if name == 'spectralindex_do':
            spi_do = value

img_opts.pop('freq0', None)
if freq0 is None:
    with pyfits.open(img_opts['filename']) as hdu:
        hdr = hdu[0].header
        for i in xrange(1, hdr['NAXIS'] + 1):
            if hdr['CTYPE{0:d}'.format(i)].startswith('FREQ'):
                freq0 = hdr['CRVAL{0:d}'.format(i)]

if spi_do and multi_chan_beam:
    with pyfits.open(img_opts['filename']) as hdu:
        hdr = hdu[0].header
    beams = []
    # Get a sequence of BMAJ with digit suffix from the image header keys
    bmaj_ind = filter(lambda a: a.startswith('BMAJ') and a[-1].isdigit(),
                      hdr.keys())
    for bmaj in bmaj_ind:
        ind = bmaj.split('BMAJ')[-1]
        beam = [
コード例 #33
0
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas

parser = argparse.ArgumentParser(description='Plot FITS spectrum to file')
parser.add_argument('infile', type=str, help='FITS filename')
parser.add_argument('outfile', type=str, help='output image filename')
parser.add_argument('--width', '-W', type=int, default=640, help='image width')
parser.add_argument('--height',
                    '-H',
                    type=int,
                    default=480,
                    help='image height')
parser.add_argument('--compact', '-c', action='store_true')

args = parser.parse_args()

hdulist = pyfits.open(args.infile)
spectrum = BessSpectra(hdulist)

outfile = open(args.outfile, 'w')

style = None
dpi = 72.

x, y = (args.width, args.height)
x = x / dpi
y = y / dpi

fig = Figure(figsize=(x, y), dpi=dpi, facecolor='white')
ax = fig.add_subplot(111)

if args.compact:
コード例 #34
0
def main(args):

    if args.plotimg:
        import pylab as plt
    # Generate lists of input images and check that they exist
    images = []
    facets = []
    psf_fwhm = []  # resolution
    frequency = []  # frequency of images (should be equal?)

    basestring = args.basestring
    imlist = glob.glob(basestring + '*.image')

    images = [i for i in imlist if not ('nm' in i)]

    #construct image, facet number list
    images = []
    fields = []
    fnumbers = []
    p = re.compile('imfield(\d)_cluster(.*)\.')
    for i in imlist:
        if 'nm' in i:
            continue
        m = p.match(i)
        if m is None:
            print 'failed to match', i
        assert (m is not None)
        images.append(i)
        fields.append(m.group(1))
        fnumbers.append(m.group(2))
    fnumberset = set(fnumbers)
    for f in fnumberset:
        fieldlist = []
        for i, (field, facet) in enumerate(zip(fields, fnumbers)):
            if f == facet:
                fieldlist.append((field, i))
        while len(fieldlist) > 1:
            # more than one field for the same facet...
            delfield, i = min(fieldlist)
            print 'de-duplicating', images[i]
            del (images[i])
            del (fields[i])
            del (fnumbers[i])
            del (fieldlist[fieldlist.index((delfield, i))])
    # now we have a non-redundant list
    for i in range(len(images)):
        print i, images[i], fields[i], fnumbers[i]

    # get the facet mask
    for fn in fnumbers:
        facets.append('templatemask_' + fn + '.masktmp')
        if not os.path.exists(facets[-1]):
            print "Error: facet image", facets[-1], "does not exist"
            return 1

    formstr = '{0:45s}  {1:45s} {2:s}  {3:s} {4:s} {5:s}'
    print formstr.format("-----", "--------", "------------", "-------",
                         "-------", "------")
    print formstr.format("Image", "FC image", "Norm. weight", "Maj(ac)",
                         "Min(ac)", "PA(deg)")
    print formstr.format("-----", "--------", "------------", "-------",
                         "-------", "------")

    for i in range(len(images)):
        this_pim = pim.image(images[i])
        info_dict = this_pim.info()['imageinfo']['restoringbeam']
        # get beam info
        bpar_ma = quanta.quantity(info_dict['major']).get_value('deg')
        bpar_mi = quanta.quantity(info_dict['minor']).get_value('deg')
        bpar_pa = quanta.quantity(info_dict['positionangle']).get_value('deg')
        psf_fwhm.append([bpar_ma, bpar_mi, bpar_pa])
        frequency.append(
            this_pim.info()['coordinates']['spectral2']['restfreq'])
        print '{0:45.45s}  {1:45.45s} {2:0.2f}          {3:0.2f}    {4:0.2f}    {5:0.2f}'.format(
            images[i], facets[i], 0, bpar_ma * 60, bpar_mi * 60, bpar_pa)

    psf_fwhm = np.array(psf_fwhm)
    frequency = np.array(frequency)
    mean_psf_fwhm = np.mean(psf_fwhm, axis=0)
    mean_frequency = np.mean(frequency)
    print '\nmean Beam: {0:0.3f} maj (arcmin), {1:2.3f} min (arcmin), {2:0.2f} pa (deg)'.format(
        mean_psf_fwhm[0] * 60, mean_psf_fwhm[1] * 60, mean_psf_fwhm[2])
    print '(Frequency (MHz):', mean_frequency * 1e-6

    if np.max(mean_frequency - frequency) / mean_frequency > 1e-6:
        print '\n\nWARNING.\nAre you using  images from different bands?'
        print 'Frequencies (Hz):', frequency
        time.sleep(2)  # give user time to see this ...

    # Initialize some vectors
    declims = []  # store the limits of the declination axes
    #ralims = [] # store the limits of the r.a. axes
    raleft = []
    raright = []
    rainc = []  # store the r.a. increments in case they differ
    decinc = []  # store the dec increments in case they differ
    pims = []  # stores the pyrap images of the data
    pfcs = []  # stores the pyrap images of the facet images

    # Get image frames for input images
    for im, fa in zip(images, facets):
        image = pim.image(im)
        sptcoords = image.coordinates().get_coordinate('spectral')
        nc = sptcoords.get_axis_size()
        #        assert(sptcoords.get_image_axis() == 0)

        # Get Stokes axis. Ensure we are working with the Stokes parameter requested.
        stkcoords = image.coordinates().get_coordinate('stokes')
        #        assert(stkcoords.get_image_axis() == 1)
        if stkcoords.get_axis_size() == 1:
            assert (stkcoords.get_stokes()[0] == args.stokes)
        else:
            stks = stkcoords.get_stokes().index(args.stokes)
            image = image.subimage(blc=(0, stks),
                                   trc=(nc - 1, stks),
                                   dropdegenerate=False)
        ns = 1

        dircoords = image.coordinates().get_coordinate('direction')
        nx = dircoords.get_axis_size(axis=1)
        ny = dircoords.get_axis_size(axis=0)
        c = []
        c.append(image.toworld((0, 0, 0, 0)))
        c.append(image.toworld((0, 0, 0, nx)))
        c.append(image.toworld((0, 0, ny, 0)))
        c.append(image.toworld((0, 0, ny, nx)))
        c = np.array(c)
        for i in range(4):
            if c[i, 3] < 0:
                c[i, 3] += 2 * np.pi

        inc = dircoords.get_increment()
        ref = dircoords.get_referencepixel()
        val = dircoords.get_referencevalue()

        # wsclean image header is weird
        if val[1] < 0:
            val[1] += 2 * np.pi
        ra_axis = (range(nx) - ref[1]) * inc[1] + val[1]
        dec_axis = (range(ny) - ref[0]) * inc[0] + val[0]
        rainc.append(inc[1])
        decinc.append(inc[0])
        declims.append(np.min(c[:, 2]))
        declims.append(np.max(c[:, 2]))
        #mean_ra = np.mean(ra_axis)
        #ralims.append((min(ra_axis)-mean_ra)*np.cos(val[0])+mean_ra)
        #ralims.append((max(ra_axis)-mean_ra)*np.cos(val[0])+mean_ra)
        #raleft.append((ra_axis[0]-mean_ra)*np.cos(val[0])+mean_ra)
        #raright.append((ra_axis[-1]-mean_ra)*np.cos(val[0])+mean_ra)
        raleft.append(np.max(c[:, 3]))
        raright.append(np.min(c[:, 3]))
        print im, raleft[-1], raright[-1], rainc[-1]
        pims.append(image)
        pfcs.append(pim.image(fa))

    # Generate the mosaic coordinate frame
    if not args.NCP:
        print('Using the regular mosaic mode.')
        master_dec = np.arange(min(declims), max(declims), min(decinc))
        if max(raleft) - min(raright) > 5. * np.pi / 3.:  # crossed RA=0
            print "Warning: I think the mosaic crosses RA=0, treating the coordinates as such."
            ##ralims[ralims>np.pi] -= 2.*np.pi
            #for i in range(len(ralims)):
            #    if ralims[i]>np.pi: ralims[i] = ralims[i]-2.*np.pi
            for i in range(len(raright)):
                raright[i] = raright[i] - 2. * np.pi
        master_ra = np.arange(max(raleft), min(raright),
                              max(rainc) / (np.cos(min(declims))))
        lmra = len(master_ra)
        if args.maxwidth != 0:
            if lmra > args.maxwidth:
                xboundary = (lmra - args.maxwidth) / 2
                master_ra = master_ra[xboundary:-xboundary]
        if args.verbose:
            print "Found ra,dec pixel increments (arcsec):"
            print np.array(rainc) * 206265., np.array(decinc) * 206265.
        ma = pims[-1].coordinates()
        ma['direction'].set_referencepixel(
            [len(master_dec) / 2, len(master_ra) / 2])
        ma['direction'].set_increment([
            decinc[np.argmin(np.abs(decinc))], rainc[np.argmin(np.abs(rainc))]
        ])
        ma['direction'].set_referencevalue(
            [master_dec[len(master_dec) / 2], master_ra[len(master_ra) / 2]])
    else:
        print('Using the special NCP mosaic mode.')
        ra_width = 20. / 180 * np.pi
        dec_width = 20. / 180 * np.pi
        rainc = rainc[np.argmin(np.abs(rainc))]
        decinc = decinc[np.argmin(np.abs(decinc))]
        ra_imsize = int(ra_width / np.abs(rainc))
        dec_imsize = int(dec_width / np.abs(decinc))
        master_ra = np.arange(ra_imsize,
                              dtype=float) / ra_imsize * rainc - ra_width / 2
        master_dec = np.arange(
            dec_imsize, dtype=float) / dec_imsize * decinc - dec_width / 2
        ma = pims[-1].coordinates()
        ma['direction'].set_referencevalue([np.pi / 2, 0.])
        ma['direction'].set_increment([decinc, rainc])
        ma['direction'].set_referencepixel([dec_imsize / 2., ra_imsize / 2.])

    # Initialize the arrays for the output image, sensitivity, and weights
    print 'making output image of size', len(master_dec), 'x', len(master_ra)
    master_im = np.zeros((len(master_dec), len(master_ra)))
    master_mask = np.zeros((len(master_dec), len(master_ra)))

    # Reproject the images onto the master grid, weight and normalize
    for i in range(len(pims)):
        print 'doing image', i
        im = pims[i].regrid([2, 3],
                            ma,
                            outshape=(nc, ns, len(master_dec), len(master_ra)))
        fa = pfcs[i].regrid([2, 3],
                            ma,
                            outshape=(nc, ns, len(master_dec), len(master_ra)))
        imdata = np.squeeze(im.getdata())
        facmask = np.squeeze(fa.getdata())
        newim = imdata * facmask
        #        newpb = pbdata
        #        newwt = (weights[i]*newpb)**2
        master_im += newim
        master_mask += facmask


#        master_sens += newpb*newwt
#        master_weight += newwt

    print 'Blanking'
    blank = np.ones_like(im) * np.nan
    master_im = np.where(master_mask, master_im, blank)
    # Show image if requested
    if args.plotimg:
        plt.imshow(master_im, vmin=0., vmax=0.5)
        plt.show()

    # Write fits files
    arrax = np.zeros((1, 1, len(master_im[:, 0]), len(master_im[0, :])))
    arrax[0, 0, :, :] = master_im

    # Open new casa image for mosaic
    new_pim = pim.image('',
                        shape=(1, 1, len(master_dec), len(master_ra)),
                        coordsys=ma)
    new_pim.putdata(arrax)
    # Write fits
    new_pim.tofits(args.outfits, overwrite=True)

    # need to add new beam info (not sure if this is possible with pyrap)
    hdu = pyfits.open(args.outfits, mode='update')
    header = hdu[0].header
    header.update('BMAJ', mean_psf_fwhm[0])
    header.update('BMIN', mean_psf_fwhm[1])
    header.update('BPA', mean_psf_fwhm[2])
    header.update('BUNIT', pims[-1].info()['unit'])
    header.update('RESTFRQ', mean_frequency)
    header.update('RESTFREQ', mean_frequency)
    newhdu = pyfits.PrimaryHDU(data=hdu[0].data, header=header)
    newhdu.writeto(args.outfits, clobber=True)

    return
コード例 #35
0
        fitsfile = imagefile
        # This should always be True for fitsimage conversions.
        input_image_origin_is_lower_left = True
    else:
        image = Image.open(imagefile)

    # Check for input FITS file with WCS.
    if not os.path.exists(fitsfile):
        print "Error: file '%s' doesn't exist" % fitsfile
        sys.exit(2)

    # Read the WCS.
    sys.stdout.write("Reading WCS from %s... " % fitsfile)
    sys.stdout.flush()
    fitslib.fits_simple_verify(fitsfile)
    fits = pyfits.open(fitsfile)
    try:
        header = fits[0].header
        wcs = wcslib.WcsProjection(header)
    finally:
        fits.close()
    sys.stdout.write("done\n")

    # Set the various warping options and warp the input image.
    projection = SkyProjection(image, wcs)
    projection.backgroundColor = (0, 0, 0, 0)
    projection.maxSideLength = max_side_length
    if input_image_origin_is_lower_left:
        projection.inputImageOrigin = ImageOrigin.LOWER_LEFT
    else:
        projection.inputImageOrigin = ImageOrigin.UPPER_LEFT
コード例 #36
0
ファイル: minimize_gaia.py プロジェクト: bxy8804/QuickReduce
import os
import sys
import pyfits

if __name__ == "__main__":

    for fn in sys.argv[1:]:

        out_fn = fn[:-5] + ".mini.fits"
        if (os.path.isfile(out_fn)):
            continue

        print("Minimizing %s to %s" % (fn, out_fn))

        hdulist = pyfits.open(fn)

        col_names = ['ra', 'dec', 'ra_error', 'dec_error', 'phot_g_mean_mag']

        flux = hdulist[1].data.field('phot_g_mean_flux')
        flux_error = hdulist[1].data.field('phot_g_mean_flux_error')
        mag_error = flux_error / flux
        mag_error[(flux <= 0) | (flux_error <= 0)] = -99.9

        mas_to_deg = (1 / 1000.) / 3600.
        columns = [
            pyfits.Column(name='ra',
                          format='D',
                          unit='Angle[deg]',
                          array=hdulist[1].data.field('ra')),
            pyfits.Column(name='dec',
コード例 #37
0
def fwhm(incat):
    """
    Get the median FWHM and ELLIPTICITY from the scamp catalog (incat)
    """
    CLASSLIM = 0.75  # class threshold to define star
    MAGERRLIMIT = 0.1  # mag error threshold for stars

    if debug:
        print "!!!! WUTL_STS: (fwhm): Opening scamp_cat to calculate median FWHM & ELLIPTICITY.\n"
    hdu = pyfits.open(incat, "readonly")

    if debug:
        print "!!!! WUTL_STS: (fwhm): Checking to see that hdu2 in scamp_cat is a binary table.\n"
    if 'XTENSION' in hdu[2].header:
        if hdu[2].header['XTENSION'] != 'BINTABLE':
            print "!!!! WUTL_ERR: (fwhm): this HDU is not a binary table"
            exit(1)
    else:
        print "!!!! WUTL_ERR: (fwhm): XTENSION keyword not found"
        exit(1)

    if 'NAXIS2' in hdu[2].header:
        nrows = hdu[2].header['NAXIS2']
        print "!!!! WUTL_INF: (fwhm): Found %s rows in table" % nrows
    else:
        print "!!!! WUTL_ERR: (fwhm): NAXIS2 keyword not found"
        exit(1)

    tbldct = {}
    for colname in [
            'FWHM_IMAGE', 'ELLIPTICITY', 'FLAGS', 'MAGERR_AUTO', 'CLASS_STAR'
    ]:
        if colname in hdu[2].columns.names:
            tbldct[colname] = hdu[2].data.field(colname)
        else:
            print "!!!! WUTL_ERR: (fwhm): No %s column in binary table" % colname
            exit(1)

    hdu.close()

    flags = tbldct['FLAGS']
    cstar = tbldct['CLASS_STAR']
    mgerr = tbldct['MAGERR_AUTO']
    fwhm = tbldct['FWHM_IMAGE']
    ellp = tbldct['ELLIPTICITY']

    fwhm_sel = []
    ellp_sel = []
    count = 0
    for i in range(nrows):
        if flags[i] < 1 and cstar[i] > CLASSLIM and mgerr[
                i] < MAGERRLIMIT and fwhm[i] > 0.5 and ellp[i] >= 0.0:
            fwhm_sel.append(fwhm[i])
            ellp_sel.append(ellp[i])
            count += 1

    fwhm_sel.sort()
    ellp_sel.sort()

    # allow the no-stars case count = 0 to proceed without crashing
    if count <= 0:
        fwhm_med = 4.0
        ellp_med = 0.0
    else:
        if count % 2:
            # Odd number of elements
            fwhm_med = fwhm_sel[count / 2]
            ellp_med = ellp_sel[count / 2]
        else:
            # Even number of elements
            fwhm_med = 0.5 * (fwhm_sel[count / 2] + fwhm_sel[count / 2 - 1])
            ellp_med = 0.5 * (ellp_sel[count / 2] + ellp_sel[count / 2 - 1])

    if debug:
        print "FWHM=%.4f" % fwhm_med
        print "ELLIPTIC=%.4f" % ellp_med
        print "NFWHMCNT=%s" % count

    return (fwhm_med, ellp_med, count)
コード例 #38
0
def buildDthProduct(pardict, output, extlist=None, outlist=None, wcs=None):
    """ 
    Parameters:
        pardict - a dictionary containing (at least):
            'data','outdata','outweight','outcontext'
            where 'data' serves as the name of the backup template image,
                and the others are filenames of the drizzle products.
        output - filename of the final combined output image
        extlist - list of EXTNAME's to be searched in the template image
        outlist - list of EXTNAME's to be used for naming the output extensions
                  
    This function will package the two or three output files from
    'drizzle' or 'tdrizzle' into a single output multi-extension 
    FITS file.
    It uses a pre-existing multi-extension FITS file as a template
    for the output product. 
    The list 'extlist' contains the names of the extensions from the
    template image which will be used to build the output file. 
    
    A number of keywords are also updated based on values from the
    drizzle products, including the CD matrix, CRPIX[1,2], CRVAL[1,2].
    In addition, the template images will be of different size than
    the drizzle products, yet this is handled automatically by PyFITS.
    
    NOTE:
    The output file will ALWAYS contain 'SCI','WHT', and 'CTX' 
    extensions.
    """
    # Set up default extlist list
    if extlist == None:
        extlist = ('SCI', 'ERR', 'DQ')
    if outlist == None:
        outlist = ('SCI', 'WHT', 'CTX')

    # Get default headers from multi-extension FITS file
    # If input data is not in MEF FITS format, it will return 'None'
    # and those headers will have to be generated from drizzle output
    # file FITS headers.
    # NOTE: These are HEADER objects, not HDUs
    prihdr, scihdr, errhdr, dqhdr = getTemplates(output, pardict['data'],
                                                 extlist)

    if prihdr == None:
        # Open FITS image and use its Primary header
        fpri = pyfits.open(pardict['outdata'])
        prihdr = pyfits.Header(cards=fpri[0].header.ascard.copy())
        fpri.close()
        del fpri

    # Setup primary header as an HDU ready for appending to output FITS file
    prihdu = pyfits.PrimaryHDU(header=prihdr, data=None)

    # Start by updating PRIMARY header keywords...
    prihdu.header.update('EXTEND', pyfits.TRUE)
    prihdu.header.update('NEXTEND', 3)
    prihdu.header.update('FILENAME', output)

    # Open the dither output SCI image
    fsci = pyfits.open(pardict['outdata'])

    # Get the total exposure time for the image
    # If not calculated by PyDrizzle and passed through
    # the pardict, then pulled from the template image.
    inhdr = fsci[0].header

    if pardict.has_key('texptime'):
        _exptime = pardict['texptime']
        _expstart = pardict['expstart']
        _expend = pardict['expend']
    else:
        _exptime = inhdr['EXPTIME']
        _expstart = inhdr['EXPSTART']
        _expend = inhdr['EXPEND']

    prihdu.header.update('EXPTIME', _exptime)
    prihdu.header.update('TEXPTIME', _exptime)
    prihdu.header.update('EXPSTART', _expstart)
    prihdu.header.update('EXPEND', _expend)

    # Update DITHCORR calibration keyword if present
    # Remove when we can modify FITS headers in place...
    if prihdu.header.has_key('DITHCORR') > 0:
        prihdu.header['DITHCORR'] = 'COMPLETE'

    # Now, build the output file
    fo = pyfits.open(output, 'append')
    # Add primary header to output file...
    fo.append(prihdu)

    # Now, build SCI extension HDU
    if scihdr == None:
        scihdr = inhdr

    if wcs != None:
        # Update ORIENTAT based on PyDrizzle product's value
        # since 'drizzle' itself doesn't update that keyword.
        scihdr.update('ORIENTAT', wcs.orient)
    # Does this need to be explicitly created or can the pre-existing
    # one simply be appended?
    #
    hdu = pyfits.ImageHDU(data=fsci[0].data, header=scihdr, name=outlist[0])
    updateDTHKeywords(hdu.header, fsci[0].header, output)

    fo.append(hdu)
    fo.flush()

    fsci.close()
    del fsci[0].data
    del fsci
    del hdu

    # Write out the WEIGHT image in the ERR array extension
    fweight = pyfits.open(pardict['outweight'])
    if errhdr == None:
        errhdr = fweight[0].header
    hdu = pyfits.ImageHDU(data=fweight[0].data, header=errhdr, name=outlist[1])
    updateDTHKeywords(hdu.header, fweight[0].header, output)

    fo.append(hdu)
    fo.flush()

    fweight.close()
    del fweight[0].data
    del fweight
    del hdu

    # Write out the Context image (if any was created)
    cfile = pardict['outcontext']
    _ctx = yes
    if findFile(cfile):
        fctx = pyfits.open(pardict['outcontext'])
        if dqhdr == None:
            dqhdr = fctx[0].header
        hdu = pyfits.ImageHDU(data=fctx[0].data, header=dqhdr, name=outlist[2])
        updateDTHKeywords(hdu.header, fctx[0].header, output)

    else:
        _ctx = no

        # Use the SCI HDU for the shape and build a default array
        imarr = N.ones(shape=fo[1].data.shape, dtype=N.int16)
        if dqhdr == None:
            dqhdr = scihdr
            dqhdr.update('EXTVER', scihdr['EXTVER'])

        hdu = pyfits.ImageHDU(data=imarr, header=dqhdr, name=outlist[2])
        print 'Dither Product: writing out empty context extension.'

    fo.append(hdu)
    # Close the output and template file
    print 'Finished creating FINAL dither product ', output

    fo.close()
    del fo[1].data
    del fo
    if _ctx:
        fctx.close()
        del fctx[0].data
        del fctx
コード例 #39
0
def OBSOLETE_readIDCtab(tabname, chip=1, direction='forward'):

    try:
        ftab = pyfits.open(tabname)
    except:
        raise IOError, "IDC table '%s' not valid as specified!" % tabname

    #First thing we need, is to read in the coefficients from the IDC
    # table and populate the Fx and Fy matrices.

    # Read FITS header to determine order of fit, i.e. k
    order = ftab['PRIMARY'].header['NORDER']

    fx = N.zeros(shape=(order + 1, order + 1), dtype=N.float32)
    fy = N.zeros(shape=(order + 1, order + 1), dtype=N.float32)

    #Determine row from which to get the coefficients.
    # How many rows do we have in the table...
    fshape = ftab[1].data.shape
    colnames = ftab[1].data._names
    row = -1

    # Loop over all the rows looking for the one which corresponds
    # to the value of CCDCHIP we are working on...
    for i in xrange(fshape[0]):
        if 'DETCHIP' in colnames:
            detchip = ftab[1].data.field('DETCHIP')[i]
        else:
            detchip = 1

# Below is the pydirzzle 3.0 solution to the problem of SBC idctabs
# having a detchip of -999 in SBC idctabs.  Better would be to explicity
# set the detchip to -999 if the detector is 'SBC'.
# Below is the old, legacy pydrizzle 2.6 code

        if 'DIRECTION' in colnames:
            direct = string.lower(ftab[1].data.field('DIRECTION')[i])
        else:
            direct = 'forward'

        if string.strip(direct) == string.strip(direction):
            if int(detchip) == int(chip) or int(detchip) == -999:
                row = i
                break


#         if 'DIRECTION' in colnames:
#             direct = string.lower(ftab[1].data.field('DIRECTION')[i])
#         else:
#             raise LookupError,'fileutil: readIDCtab did not find valid DIRECTION'

#         if (string.strip(direct) == string.strip(direction)) and (int(detchip) == int(chip)):
#                 row = i
# 		print '_readIDCtab breaking on row ',i, 'detchip,direction: ',detchip,direct
# 		break

    if row < 0:
        print 'Row corresponding to DETCHIP of ', chip, ' was not found!'
        raise LookupError

    refpix = {}
    refpix['XREF'] = ftab[1].data.field('XREF')[row]
    refpix['YREF'] = ftab[1].data.field('YREF')[row]
    refpix['XSIZE'] = ftab[1].data.field('XSIZE')[row]
    refpix['YSIZE'] = ftab[1].data.field('YSIZE')[row]
    refpix['PSCALE'] = ftab[1].data.field('SCALE')[row]
    refpix['V2REF'] = ftab[1].data.field('V2REF')[row]
    refpix['V3REF'] = ftab[1].data.field('V3REF')[row]
    refpix['XDELTA'] = 0.0
    refpix['YDELTA'] = 0.0
    refpix['centered'] = no

    # Now that we know which row to look at, read coefficients into the
    #	numeric arrays we have set up...
    # Setup which column name convention the IDCTAB follows
    # either: A,B or CX,CY
    if 'CX10' in ftab[1].data._names:
        cxstr = 'CX'
        cystr = 'CY'
    else:
        cxstr = 'A'
        cystr = 'B'

    for i in xrange(order + 1):
        if i > 0:
            for j in xrange(i + 1):
                xcname = cxstr + str(i) + str(j)
                ycname = cystr + str(i) + str(j)
                fx[i, j] = ftab[1].data.field(xcname)[row]
                fy[i, j] = ftab[1].data.field(ycname)[row]

    ftab.close()
    del ftab

    # Return arrays and polynomial order read in from table.
    # NOTE: XREF and YREF are stored in Fx,Fy arrays respectively.
    return fx, fy, refpix, order
コード例 #40
0
def getTemplates(oname, tname, extlist):
    # Obtain default headers for output file
    # If the output file already exists, use it
    # If not, use an input file for this information.
    #
    # NOTE: Returns 'pyfits.Header' objects, not HDU objects!
    #
    fname = None
    if checkFileExists(oname):
        fname = oname
    else:
        fname = tname

    if fname != None and string.find(fname, '.fits') > 0:
        # Open an calibrated ACS image as a template
        _indx = string.find(tname, '[')
        if _indx > 0:
            template = tname[:_indx]
        else:
            template = tname
        ftemplate = pyfits.open(template)

        # Setup which keyword we will use to select each
        # extension...
        _extkey = 'EXTNAME'

        #
        # Now, extract the headers necessary for output (as copies)
        # 1. Find the SCI extension in the template image
        # 2. Make a COPY of the extension header for use in new output file
        prihdr = pyfits.Header(cards=ftemplate['PRIMARY'].header.ascard.copy())
        extnum = findKeywordExtn(ftemplate, _extkey, extlist[0])
        scihdr = pyfits.Header(cards=ftemplate[extnum].header.ascard.copy())

        extnum = findKeywordExtn(ftemplate, _extkey, extlist[1])
        errhdr = pyfits.Header(cards=ftemplate[extnum].header.ascard.copy())
        extnum = findKeywordExtn(ftemplate, _extkey, extlist[2])
        dqhdr = pyfits.Header(cards=ftemplate[extnum].header.ascard.copy())

        ftemplate.close()
        del ftemplate

    else:
        # Create default headers from scratch
        prihdr = None
        scihdr = None
        errhdr = None
        dqhdr = None

    # Now, safeguard against having BSCALE and BZERO
    try:
        del scihdr['bscale']
        del scihdr['bzero']
        del errhdr['bscale']
        del errhdr['bzero']
        del dqhdr['bscale']
        del dqhdr['bzero']
    except:
        # If these don't work, they didn't exist to start with...
        pass

    # At this point, check errhdr and dqhdr to make sure they
    # have all the requisite keywords (as listed in updateDTHKeywords).
    # Simply copy them from scihdr if they don't exist...
    if errhdr != None and dqhdr != None:
        for keyword in DTH_KEYWORDS:
            if not errhdr.has_key(keyword):
                errhdr.update(keyword, scihdr[keyword])
            if not dqhdr.has_key(keyword):
                dqhdr.update(keyword, scihdr[keyword])

    return prihdr, scihdr, errhdr, dqhdr
コード例 #41
0
#######################################################################################

##################################################################
# GET redshift - time conversion
######
# F737 Cosmology
F737 = 'Redshift_time_F737_cosmology.fits'
# PLanck cosmology
FPlanck = 'Redshift_time_FPlanck_cosmology.fits'

# Which Cosmology to use:
USING_NOW = FPlanck

#LOAD Redshift-time dependence
A = py.open(USING_NOW)
NOW = A[1].data
#LOAD GENERAL PROPERTIES - what will be plotted
Redshift = NOW.field('Redshift')
Time = NOW.field('Time_Gyr')
# time to redshift relation
t_to_z = interp1d(Time, Redshift, kind='linear', bounds_error=None)

############################################################
# Integration limits and conversion z to time
#
ts = np.linspace(
    0.25, 13.4, 60000
)  # Produce time grid  - 50000 steps seems to be a good minimum for convergence
zs = t_to_z(ts)
t0 = 0.0
コード例 #42
0
def readAsnTable(fname, output, prodonly=yes):
    """
     This function reads the filenames/rootnames and shifts from a FITS
     ASN table. 

     Column names expected are:
       MEMNAME     - rootname of each member
       MEMTYPE     - type of member in association(PROD-* or EXP-*)
       XOFFSET     - shift in X for this observation
       YOFFSET     - shift in Y for this observation
       ROTATION    - additional rotation to be applied 
       SCALE	   - scale image by this value

     This will return a nested dictionary corresponding to this
     association table.
     Observation dictionary: {'xsh':0.,'ysh':0.,'rot':0.,'scale':1.}
     Product dictionary: {'output':type, 'memname1':dict, 'memname2':dict,...}
	       where dict: Observation dictionary
     The dictionary using EXP* will be:
       p = {'output':'dthname','members':{
		    'name1':{'xsh':0.,...}, 'name2':{'xsh':0.,...},...
		       }
		    }
     You get a list of input names using 'p.keys()'.

     Parameters:
	       output:     output - desired name of output image (None,'EXP', or user-filename)
	       prodonly:   yes - only select MEMTYPE=PROD* as input observations
				       no - use MEMTYPE=EXP* as input observations

     Output: If none is specified by user, the first 'PROD-DTH' filename found in the 
 		    ASN table will be used.  If there is no 'PROD-DTH' entry, the first 'PROD' 
		    entry will be used instead.  Finally, if 'output' = 'EXP', the value 'EXP'
		    will be passed along and interpreted as a switch to use the input filename
		    as the output resulting in every input creating a separate output.
    """
    # Initialize this dictionary for output
    asndict = {'output': None, 'members': {}}

    #print "Read ASN table"
    #pdb.set_trace()
    # Open the table...
    try:
        ftab = pyfits.open(fname)
    except:
        raise IOError, "Association table '%s' not valid as specified!" % fname

    tablen = ftab[1].data.shape[0]
    colnames = ftab[1].data._names
    # Set a flag to specify whether ASN has a PROD-DTH member
    dthprod = no
    # Now, put it together with rootname and type...
    for row in xrange(tablen):
        # Read in required columns for each row
        if 'MEMNAME' in colnames and 'MEMTYPE' in colnames:
            # We need to make sure no EOS characters remain part of
            # the strings we read out...
            mname = string.split(ftab[1].data.field('MEMNAME')[row], '\0',
                                 1)[0]
            mtype = string.split(ftab[1].data.field('MEMTYPE')[row], '\0',
                                 1)[0]
            memname = string.strip(mname)
            memtype = string.strip(mtype)
            memrow = row
        else:
            print 'Association table incomplete: required column(s) MEMNAME/MEMTYPE NOT found!'
            raise LookupError

        # Do we care about this entry?
        # Entries that should be used to build DTH product are:
        #  PROD-RPT, PROD-CRJ, EXP-DTH
        if string.find(memtype, 'PROD') < 0 and string.find(
                memtype, 'EXP-DTH') < 0:
            if prodonly == yes:
                # We are looking at an EXP* entry we don't want...
                continue

        memdict = {}
        # Keep track of which order they were read in by their row number
        memdict['row'] = memrow
        memdict['xoff'] = 0.
        memdict['yoff'] = 0.
        memdict['rot'] = 0.
        # Read in optional data from columns
        # X offset
        if 'XOFFSET' in colnames:
            memdict['delta_x'] = ftab[1].data.field('XOFFSET')[row]
        else:
            memdict['delta_x'] = 0.

        # Y offset
        if 'YOFFSET' in colnames:
            memdict['delta_y'] = ftab[1].data.field('YOFFSET')[row]
        else:
            memdict['delta_y'] = 0.

        # Rotation angle
        if 'ROTATION' in colnames:
            memdict['delta_rot'] = ftab[1].data.field('ROTATION')[row]
        else:
            memdict['delta_rot'] = 0.

        # Scale: output pixel size
        if 'SCALE' in colnames:
            memdict['scale'] = ftab[1].data.field('SCALE')[row]
        else:
            memdict['scale'] = 1.

        # Build the shifts dictionary now...
        if string.find(memtype, 'PROD') < 0 and prodonly == no:
            # We want to use this EXP* entry.
            asndict['members'][memname] = memdict
        elif memtype == 'PROD-DTH':
            # We have found a dither product specified in ASN
            # Not to be used for input, but
            # has one already been specified as the final output?
            if dthprod == no:
                if output == None:
                    # Use default output name
                    asndict['output'] = memname
                else:
                    # Use user-specified output name here
                    asndict['output'] = output
                dthprod = yes
        else:
            # We are working with a PROD* entry...
            if prodonly == yes:
                asndict['members'][memname] = memdict

        # Set up a default output filename
        # This will be overwritten by a different output
        # name if a PROD-DTH entry is found in the ASN table
        # and 'output' was not specified by the user.
        # Useful for CR-SPLIT/REPEAT-OBS ASN tables.
        if asndict['output'] == None:
            if output == None:
                asndict['output'] = memname
            else:
                asndict['output'] = output

    # Finished reading all relevant rows from table
    ftab.close()
    del ftab

    return asndict
コード例 #43
0
def do_one_file(dirname, fitsfile, run, ccd, plate, ndfclass_updated, cob_id,
                runccd_id, run_id, motherfolder, n, N):
    print n, "/", N,
    print " + Creating table for file", str(dirname) + "/" + fitsfile
    out_storage = []
    i = (dirname, fitsfile, run, ccd, plate, ndfclass_updated, cob_id,
         runccd_id, run_id)
    path = find_file(i[0], i[1], i[3], motherfolder)
    if path == None:
        print " - No file found for %s/%s" % (i[0], i[1])

    hdulist = pyfits.open(path)
    fdata = hdulist["STRUCT.MORE.FIBRES"].data
    name = fdata["NAME"]
    ra = fdata["RA"]
    de = fdata["DEC"]
    x = fdata["X"]
    y = fdata["Y"]
    xe = fdata["XERR"]
    ye = fdata["YERR"]
    tp = fdata["TYPE"]
    pivot = fdata["PIVOT"]
    mag = fdata["MAGNITUDE"]
    comment = fdata["COMMENT"]
    name = fdata["NAME"]
    mag = fdata["MAGNITUDE"]
    theta = fdata["THETA"]
    pmra = fdata["PMRA"]
    pmdec = fdata["PMDEC"]
    pid = fdata["PID"]
    retractor = fdata["RETRACTOR"]
    wlen = fdata["WLEN"]
    hdulist.close()

    h0 = pyfits.getheader(path, 0)
    utdate = h0["UTDATE"]
    utstart = h0["UTSTART"]
    zenithh = h0["ZDSTART"]

    for j in range(1, 401):

        #change coordinates into degrees
        ra_ins = np.degrees(ra[j - 1])
        de_ins = np.degrees(de[j - 1])

        #fix observations type
        tp_ins = tp[j - 1]
        if tp_ins == '.': tp_ins = 'P'

        #calculate airmass
        zenith = zenith_distance(utdate, utstart, ra_ins, de_ins)
        airmass = 1.0 / np.cos(np.radians(float(zenith)))

        #calculate barycentric velocity
        barycentric, heliocentric = sol_corrections(ra_ins, de_ins, h0)
        barycentric = barycentric.to("km/s").value
        heliocentric = heliocentric.to("km/s").value

        galahic = find_galah_id(name[j - 1])
        outname = ((i[0] * 10000 + i[2]) * 100000 +
                   pivot[j - 1]) * 10 + i[3]  #[date][run][algo][pivot][ccd]
        #print i[0],i[1],i[2],i[3],i[4],i[5],i[6],pivot[j-1],j,tp_ins,ra_ins,de_ins,x[j-1],y[j-1],xe[j-1],ye[j-1],name[j-1],comment[j-1],mag[j-1],galahic, outname, airmass, barycentric, heliocentric
        out_storage.append(
            (i[7], i[8], pivot[j - 1], j, tp_ins, ra_ins, de_ins, x[j - 1],
             y[j - 1], xe[j - 1], ye[j - 1], theta[j - 1], name[j - 1],
             comment[j - 1], mag[j - 1], pmra[j - 1], pmdec[j - 1],
             round(pid[j - 1], 12), retractor[j - 1], round(wlen[j - 1], 12),
             galahic, outname, airmass, barycentric, heliocentric))
        #cur.execute("insert into objects values (%s, '%s', %s, %s, %s, '%s', %s, %s, %s, '%s', %s, %s, %s, %s, %s, %s, '%s', '%s', %s, %s, %s, %s, %s, %s)" % (i[0],i[1],i[2],i[3],i[4],i[5],i[6],pivot[j-1],j,tp_ins,ra_ins,de_ins,x[j-1],y[j-1],xe[j-1],ye[j-1],name[j-1],comment[j-1],mag[j-1],galahic, outname, airmass, barycentric, heliocentric))
    #con.commit()
    return out_storage
コード例 #44
0
def fits_open(file_in):
    Open_C = pyfits.open(file_in)
    Fits_catalogue = Open_C[1].data
    return Fits_catalogue
コード例 #45
0
#!/usr/local/bin/python

import numpy
import numpy as np
import pyfits
from pyslalib import slalib
import pywcs
import math
from math import *
import re
from glob import glob

degtorad = math.pi / 180

file = 'pnS005-cheese.fits'
hdulist = pyfits.open(file)
cra = hdulist[0].header['CRVAL1']  # cra = central ra
cdec = hdulist[0].header['CRVAL2']  # cdec = central dec
hdulist.close()

file = 'CDFS4MS_REVISED.fits'
f = open(file)
lines = f.readlines()
f.close()

chandra_ra = []
chandra_dec = []
chandra_r = []
chandra_theta = []
chandra_type = []
chandra_flux = []
コード例 #46
0
ファイル: COSNUVCalib.py プロジェクト: RainW7/SamPy
def get_NUV_PSA_WCA(psalist, wcalist, scale=False, width=512, ishift=1, extrakeys=False, debug=False):
    """
     Does a cross-correlation between a pair of x1d files  containing the PSA and WCA spectra for the same central wavelegth.

     input:
     psalist - a list containing filenames of the x1d spectra for the PSA
     wcalist - a list containing filenames of the x1d spectra for the WCA

     optional input:
     scale - whether wca spectrum is multiplied with boxcar smoothing factor of the
             ratio between psa and wca spectrum
     ishift - guess of the intial shift in pixels, int
     width - width of the search area in pixels, int
     returns:
     a list with central wavelengths, stripes, and calculated offset values.

     """
    if scale: from numarray.convolve import boxcar as bc
    if extrakeys: import glob

    lpsa = len(psalist)
    lwca = len(wcalist)

    result = []

    if debug: print '%i and %i PSA and WCA files will be processed, respectively' % (lpsa, lwca)

    if lpsa != lwca:
        print 'The lists of filenames do not have the same number of elements.'
        print 'psalist has %i elements while wcalist has %i' % (lpsa, lwca)
        print 'Will exit now...'
        sys.exit(-1)

    for psafile, wcafile in zip(psalist, wcalist):
        if debug: print 'Running files %s and %s' % (psafile, wcafile)

        try:
            #psadata, psahdr = pf.getdata(psafile, header = True)
            #wcadata, wcahdr = pf.getdata(wcafile, header = True)
            #Above did not return the whole header for some reason?
            psa = pf.open(psafile)
            wca = pf.open(wcafile)
            psahdr = psa[0].header
            wcahdr = wca[0].header
            psadata = psa[1].data
            wcadata = wca[1].data
            psa.close()
            wca.close()
        except:
            print 'Error while reading data...'

        if extrakeys:
            try:
                #path = '/Volumes/cos/PreLaunch/Data/TV06/FITS/Test_Processing/Jan_15_2009_fixed/'
                #spt = path + psafile[:21] + '_spt.fits'
                path = '/Volumes/cos/PreLaunch/Data/TV03/FITS/Test_Processing/Jan_05_2009/'
                spt = path + psafile[50:-19] + '_spt.fits'
                sptlist = pf.open(spt)
                spthdr = sptlist[2].header
                sptlist.close()
            except:
                print 'Error while opening %s file...' % spt

        cenwav = psahdr['CENWAVE']
        stripe = psahdr['SEGMENT']
        grating = psahdr['OPT_ELEM']
        fppos = psahdr['FPPOS']
        psay = psadata[0][1]
        wcay = wcadata[0][1]

        ldstp = -999.
        ldvdt = -999.
        lxstp = -999.
        lxvdt = -999.
        if extrakeys:
            try:
                ldstp = spthdr['LAPDSTP']
                ldvdt = spthdr['LAPDLVDT']
                lxstp = spthdr['LAPXSTP']
                lxvdt = spthdr['LAPXLVDT']
            except:
                print 'Error while reading extra keys...'

        if cenwav != wcahdr['CENWAVE']:
            print 'Error - PSA and WCA files are not at same CENWAVE'
            print 'Will skip the files'
            continue

        if stripe != wcahdr['SEGMENT']:
            print 'Error - PSA and WCA files are not from the same STRIPE'
            print 'Will skip the files'
            continue

        if debug: print 'Processing the central wavelenght of %i Angstroms' % cenwav
        if debug: print 'Processing the %s segment' % stripe

        if scale:
            mpsay = max(bc(psay, (5,)))
            mwcay = max(bc(wcay, (5,)))
            factor = mpsay / mwcay
            wcay *= factor
            print 'Boxcar smoothing for psa: %s and wca %s' % (mpsay, mwcay)

        #correlation:
        #correlation2 = correlate(psay, wcay, mode = conv.VALID)
        #correlation2 = correlate(psay, wcay, mode = conv.FULL)
        #correlation2 = correlate(psay, wcay)
        #VALID gives the same result as this
        #t = Numeric.cross_correlate(psay, wcay)

        offs, correlation = SpectrumOffset(psay, wcay, width=width, i1=ishift)

        if debug: print 'Correlation: %s' % correlation
        if debug: print 'Offset %8.6f found' % offs

        #NOTE:
        #there is - in front of the offs
        #fix this if used properly calibrated data!!!

        if extrakeys:
            result.append([cenwav, stripe, -offs, psafile, grating, fppos, ldstp, ldvdt, lxstp, lxvdt])
        else:
            result.append([cenwav, stripe, -offs, psafile, grating, fppos])

    return result
コード例 #47
0
def keptrim(infile, outfile, kepid, column, row, imsize, clobber, verbose,
            logfile, status):

    # startup parameters

    status = 0

    # log the call

    hashline = '----------------------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = 'KEPTRIM -- '
    call += 'infile=' + infile + ' '
    call += 'outfile=' + outfile + ' '
    call += 'kepid=' + str(kepid) + ' '
    call += 'column=' + str(column) + ' '
    call += 'row=' + str(row) + ' '
    call += 'imsize=' + str(imsize) + ' '
    overwrite = 'n'
    if (clobber): overwrite = 'y'
    call += 'clobber=' + overwrite + ' '
    chatter = 'n'
    if (verbose): chatter = 'y'
    call += 'verbose=' + chatter + ' '
    call += 'logfile=' + logfile
    kepmsg.log(logfile, call + '\n', verbose)

    # start time

    kepmsg.clock('KEPTRIM started at', logfile, verbose)

    # test log file

    logfile = kepmsg.test(logfile)

    # clobber output file

    if clobber: status = kepio.clobber(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        message = 'ERROR -- KEPTRIM: ' + outfile + ' exists. Use --clobber'
        status = kepmsg.err(logfile, message, verbose)

# open input file

    status = 0
    instr = pyfits.open(infile, mode='readonly', memmap=True)
    cards0 = instr[0].header.cards
    cards1 = instr[1].header.cards
    cards2 = instr[2].header.cards

    # fudge non-compliant FITS keywords with no values

    if status == 0:
        instr = kepkey.emptykeys(instr, file, logfile, verbose)

# identify the season of observation

    if status == 0:
        try:
            season = cards0['SEASON'].value
        except:
            season = 0

# retrieve column and row from KIC

        try:
            kic = FOVKepID(str(kepid))
            column = int(kic[98 + season * 5])
            row = int(kic[97 + season * 5])
        except:
            pass

# convert CCD column and row to image column and row

    if status == 0:
        if imsize % 2 == 0: imsize += 1
        crpix1p = cards2['CRPIX1P'].value
        crpix2p = cards2['CRPIX2P'].value
        crval1p = cards2['CRVAL1P'].value
        crval2p = cards2['CRVAL2P'].value
        cdelt1p = cards2['CDELT1P'].value
        cdelt2p = cards2['CDELT2P'].value
        imcol = (column - crval1p) * cdelt1p + crpix1p - 1
        imrow = (row - crval2p) * cdelt2p + crpix2p - 1
        crval1p = column - imsize / 2 + 0.5
        crval2p = row - imsize / 2 + 0.5

# check subimage is contained inside the input image

    if status == 0:
        naxis1 = cards2['NAXIS1'].value
        naxis2 = cards2['NAXIS2'].value
        x1 = imcol - imsize / 2 + 0.5
        x2 = x1 + imsize
        y1 = imrow - imsize / 2 + 0.5
        y2 = y1 + imsize
        if x1 < 0 or y1 < 0 or x2 > naxis1 or y2 > naxis2:
            message = 'ERROR -- KEPTRIM: Requested pixel area falls outside of the pixel image in file ' + infile
            message += '. Make the pixel area smaller or relocate it' 's center.'
            status = kepmsg.err(logfile, message, verbose)

# time series data

    if status == 0:
        time = instr[1].data.field('TIME')[:]
        timecorr = instr[1].data.field('TIMECORR')[:]
        cadenceno = instr[1].data.field('CADENCENO')[:]
        raw_cnts = instr[1].data.field('RAW_CNTS')[:]
        flux = instr[1].data.field('FLUX')[:]
        flux_err = instr[1].data.field('FLUX_ERR')[:]
        flux_bkg = instr[1].data.field('FLUX_BKG')[:]
        flux_bkg_err = instr[1].data.field('FLUX_BKG_ERR')[:]
        cosmic_rays = instr[1].data.field('COSMIC_RAYS')[:]
        quality = instr[1].data.field('QUALITY')[:]
        pos_corr1 = instr[1].data.field('POS_CORR1')[:]
        pos_corr2 = instr[1].data.field('POS_CORR2')[:]

# resize time series

    if status == 0:
        raw_cnts = raw_cnts[:, y1:y2, x1:x2]
        flux = flux[:, y1:y2, x1:x2]
        flux_err = flux_err[:, y1:y2, x1:x2]
        flux_bkg = flux_bkg[:, y1:y2, x1:x2]
        flux_bkg_err = flux_bkg_err[:, y1:y2, x1:x2]
        cosmic_rays = cosmic_rays[:, y1:y2, x1:x2]

# reshape time series images

    if status == 0:
        isize = numpy.shape(flux)[0]
        jsize = numpy.shape(flux)[1]
        ksize = numpy.shape(flux)[2]
        raw_cnts = numpy.reshape(raw_cnts, (isize, jsize * ksize))
        flux = numpy.reshape(flux, (isize, jsize * ksize))
        flux_err = numpy.reshape(flux_err, (isize, jsize * ksize))
        flux_bkg = numpy.reshape(flux_bkg, (isize, jsize * ksize))
        flux_bkg_err = numpy.reshape(flux_bkg_err, (isize, jsize * ksize))
        cosmic_rays = numpy.reshape(cosmic_rays, (isize, jsize * ksize))

# pixel map data

    if status == 0:
        maskmap = array(instr[2].data[y1:y2, x1:x2])

# construct output primary extension

    if status == 0:
        hdu0 = pyfits.PrimaryHDU()
        for i in range(len(cards0)):
            try:
                if cards0[i].key not in list(hdu0.header.keys()):
                    hdu0.header.update(cards0[i].key, cards0[i].value,
                                       cards0[i].comment)
                else:
                    hdu0.header.cards[
                        cards0[i].key].comment = cards0[i].comment
            except:
                pass
        status = kepkey.history(call, hdu0, outfile, logfile, verbose)
        outstr = HDUList(hdu0)

# construct output light curve extension

    if status == 0:
        coldim = '(' + str(imsize) + ',' + str(imsize) + ')'
        eformat = str(imsize * imsize) + 'E'
        jformat = str(imsize * imsize) + 'J'
        kformat = str(imsize * imsize) + 'K'
        col1 = Column(name='TIME',
                      format='D',
                      unit='BJD - 2454833',
                      array=time)
        col2 = Column(name='TIMECORR', format='E', unit='d', array=timecorr)
        col3 = Column(name='CADENCENO', format='J', array=cadenceno)
        col4 = Column(name='RAW_CNTS',
                      format=jformat,
                      unit='count',
                      dim=coldim,
                      array=raw_cnts)
        col5 = Column(name='FLUX',
                      format=eformat,
                      unit='e-/s',
                      dim=coldim,
                      array=flux)
        col6 = Column(name='FLUX_ERR',
                      format=eformat,
                      unit='e-/s',
                      dim=coldim,
                      array=flux_err)
        col7 = Column(name='FLUX_BKG',
                      format=eformat,
                      unit='e-/s',
                      dim=coldim,
                      array=flux_bkg)
        col8 = Column(name='FLUX_BKG_ERR',
                      format=eformat,
                      unit='e-/s',
                      dim=coldim,
                      array=flux_bkg_err)
        col9 = Column(name='COSMIC_RAYS',
                      format=eformat,
                      unit='e-/s',
                      dim=coldim,
                      array=cosmic_rays)
        col10 = Column(name='QUALITY', format='J', array=quality)
        col11 = Column(name='POS_CORR1',
                       format='E',
                       unit='pixel',
                       array=pos_corr1)
        col12 = Column(name='POS_CORR2',
                       format='E',
                       unit='pixel',
                       array=pos_corr2)
        cols = ColDefs([
            col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11,
            col12
        ])
        hdu1 = new_table(cols)
        for i in range(len(cards1)):
            try:
                if cards1[i].key not in list(hdu1.header.keys()):
                    hdu1.header.update(cards1[i].key, cards1[i].value,
                                       cards1[i].comment)
                else:
                    hdu1.header.cards[
                        cards1[i].key].comment = cards1[i].comment
            except:
                pass
        hdu1.header.update('1CRV4P', crval1p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('2CRV4P', crval2p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('1CRPX4', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 1')
        hdu1.header.update('2CRPX4', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 2')
        hdu1.header.update('1CRV5P', crval1p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('2CRV5P', crval2p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('1CRPX5', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 1')
        hdu1.header.update('2CRPX5', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 2')
        hdu1.header.update('1CRV6P', crval1p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('2CRV6P', crval2p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('1CRPX6', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 1')
        hdu1.header.update('2CRPX6', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 2')
        hdu1.header.update('1CRV7P', crval1p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('2CRV7P', crval2p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('1CRPX7', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 1')
        hdu1.header.update('2CRPX7', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 2')
        hdu1.header.update('1CRV8P', crval1p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('2CRV8P', crval2p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('1CRPX8', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 1')
        hdu1.header.update('2CRPX8', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 2')
        hdu1.header.update('1CRV9P', crval1p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('2CRV9P', crval2p,
                           '[pixel] detector coordinate at reference pixel')
        hdu1.header.update('1CRPX9', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 1')
        hdu1.header.update('2CRPX9', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 2')
        outstr.append(hdu1)

# construct output mask bitmap extension

    if status == 0:
        hdu2 = ImageHDU(maskmap)
        for i in range(len(cards2)):
            try:
                if cards2[i].key not in list(hdu2.header.keys()):
                    hdu2.header.update(cards2[i].key, cards2[i].value,
                                       cards2[i].comment)
                else:
                    hdu2.header.cards[
                        cards2[i].key].comment = cards2[i].comment
            except:
                pass
        hdu2.header.update('NAXIS1', imsize, '')
        hdu2.header.update('NAXIS2', imsize, '')
        hdu2.header.update('CRVAL1P', crval1p,
                           '[pixel] detector coordinate at reference pixel')
        hdu2.header.update('CRVAL2P', crval2p,
                           '[pixel] detector coordinate at reference pixel')
        hdu2.header.update('CRPIX1', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 1')
        hdu2.header.update('CRPIX2', (imsize + 1) / 2,
                           '[pixel] reference pixel along image axis 2')
        outstr.append(hdu2)

# write output file

    if status == 0:
        outstr.writeto(outfile, checksum=True)

# close input structure

    if status == 0:
        status = kepio.closefits(instr, logfile, verbose)

# end time

    kepmsg.clock('KEPTRIM finished at', logfile, verbose)
コード例 #48
0
import pyfits
import pylab
import ephem
import time
from PIL import Image
from numpy import *

file = pyfits.open(
    '/home/scratch/kbandura/GBT09C_075/00_zCOSMOS_drift_9-17.raw.acs.fits')

gbt_data = file[1].data

#for drift scan crval2 is az, crval3 is el
#for ra-long maps, crval2 is ra, crval3 is dec
az_gbt = gbt_data.field('crval2')
el_gbt = gbt_data.field('crval3')
times = gbt_data.field('DATE-OBS')
dur = gbt_data.field('DURATION')

GBT = ephem.Observer()
GBT.long = '-79:50:23.4'
GBT.lat = '38:25:59.23'
GBT.pressure = 0
GBT.temp = 0
az_r = az_gbt * pi / 180.0
el_r = el_gbt * pi / 180.0

max_times = times.shape[0]
rag = zeros(max_times)
decg = zeros(max_times)
コード例 #49
0
input_fits_image_band1 = './input/ngp7_ks_2h30min_20160228_astro_2MASS_0p231rms.fits'  #=====Ks
input_fits_image_band2 = './input/ngp7_j_1h_20160228_astro_2MASS_0p231rms.fits'  #=====J
input_fits_image_band3 = './input/ngp7_h_54min_20160228_astro_2MASS_0p232rms.fits'  #=====H
#input_fits_image_band4='./input/gama12_i_band_ACAM_53min_20160226_astro_0p241rms.fits'  #=====I
#input_fits_image_band5='./input/G12v230_IRAC_Mosaic_36.fits'                            #=====3.6 micron
#input_fits_image_band6='./input/G12v230_IRAC_Mosaic_45.fits'                            #=====4.5 micron

band4_present = 0  #=================================================================1:yes 0:no (I band)
band5_present = 0  #=================================================================1:yes 0:no (3.6 band)
band6_present = 0  #=================================================================1:yes 0:no (4.5 band)

#================================Load in fits table===============================================

names_cutoff = pyfits.open(
    './aplpy_subset_combined.fits'
)  #========================================================HERE
names_cutoff_data = names_cutoff[1].data
X_WORLD_K = names_cutoff_data.field('X_WORLD_K')
Y_WORLD_K = names_cutoff_data.field('Y_WORLD_K')
K_mag = names_cutoff_data.field(
    'M_calibrated_K=MAG_APER_K+26.2715655err0.0033601851'
)  #==================================================HERE note to correct column name
J_mag = names_cutoff_data.field(
    'M_calibrated_J=MAG_APER_J+-27.7601671913err0.0105872686'
)  #==================================================HERE note to correct column name
H_mag = names_cutoff_data.field(
    'M_calibrated_H=MAG_APER_H+-27.3164248253err0.0079622063'
)  #==================================================HERE note to correct column name
#I_mag = names_cutoff_data.field('M_calibrated_I=MAG_APER_I+32.5366530465err0.0029509302') #==================================================HERE note to correct column name
NUMBER_K = names_cutoff_data.field('NUMBER_K')
コード例 #50
0
kb = 1.38E-16

pixel_area = (10 / 180. * math.pi / 1024)**2


def Tb2flux(Tb, nu_MHz):
    nu = nu_MHz * 1E6
    return kb * Tb * 2 * nu**2 / c**2 * pixel_area


fout = open('spec.qdp', 'w')

for k in range(0, len(freq_list)):
    fname = fprefix + ` freq_list[k] ` + '.fits'
    img = pyfits.open(fname)[0].data
    center_ij = img_utils.max_element(img)
    print center_ij

    for i in range(-rmax, rmax + 1):
        for j in range(-rmax, rmax + 1):
            r = int(math.sqrt(i**2 + j**2))
            if img[center_ij[0] + i, center_ij[1] + j] > 0 and r < rmax:
                profile_sum[k][int(r)] += img[center_ij[0] + i,
                                              center_ij[1] + j]
                profile_cnt[k][int(r)] += 1

    for i in range(0, len(profile_sum[k])):
        if profile_cnt[k][i] > 0:
            profile_sum[k][i] /= profile_cnt[k][i]
            if profile_sum[k][i] < profile_sum[k][0] / 10.:
コード例 #51
0
def get_pc_setpts(log_name = "setpt_log.csv", mode = "science"):
    ''' 
    Take FFT of PSF, and calculate new Phasecam PL and TT setpoints
    
    INPUTS:
    log_name: name of the csv file to which FFT information will be printed
    mode: "fake_fits": read in fake FITS files (but continue sending LMIR and mirror commands)
          "artif_source": use an artificial source (either laser or pinhole)
          "science": on-sky
    '''

    ampArray = []

    counter_num = 0

    take_roi_background()

    if (mode == "fake_fits"):
        #f = pyfits.open("test_fits_files/test_frame_fiz_large.fits")
        f = pyfits.open("test_fits_files/test_frame_fiz_small.fits")
    elif (mode == "science"):
        # take a frame with background subtracting
        print("Taking a background-subtracted frame")
        f = pi.getFITS("LMIRCAM.fizPSFImage.File", "LMIRCAM.acquire.enable_bg=1;int_time=%i;is_bg=0;is_cont=0;num_coadds=1;num_seqs=1" % 100, timeout=60)

    image = f[0].data

    for f in range(0,1): # just 1 sample for now

        start = time.time() # start timer


        ## ##image, header = fits.getdata(filename_str,0,header=True)

        # locate PSF
        psf_loc = overlap_psfs.find_airy_psf(image)

        # size of cookie cut-out (measured center-to-edge)
        #cookie_size = 100 # maximum control radius as of 2018 July corresponds to 130.0 pixels

        # take FFT
        # cookie_cut = image[psf_loc[0]-cookie_size:psf_loc[0]+cookie_size,psf_loc[1]-cookie_size:psf_loc[1]+cookie_size]
        cookie_cut = np.copy(image)
        amp, arg = fft_img(cookie_cut).fft(padding=int(5*cookie_size), mask_thresh=1e5)

        # test: image with a perfect slope
        ''' 
        testing, header = fits.getdata('slope_test_psf.fits',0,header=True)
        cookie_cut_testing = testing[psf_loc[0]-cookie_size:psf_loc[0]+
                                     cookie_size,psf_loc[1]-cookie_size:psf_loc[1]+cookie_size]
        #sciImg = ma.asarray(sciImg)
        amp[np.isfinite(amp)] = -1 #cookie_cut_testing[np.isfinite(amp)]
        '''

        # sanity check (and to avoid getting for loop stuck)
        if (np.shape(amp)[0]!=np.shape(amp)[1]): # if the FFT doesn't make sense (i.e., if PSF was not found)
            print('PSF does not make sense ... aborting this one ...')
            continue

        print(amp.data)
        print(arg.data)

        # analyze FFTs
        fftInfo_amp = fftMask(amp,wavel_lambda,plateScale,
                                  fyi_string=str("{:0>6d}".format(f))+' FFT amp')
        fftInfo_arg = fftMask(arg,wavel_lambda,plateScale,
                                  fyi_string=str("{:0>6d}".format(f))+' FFT phase')

        print(fftInfo_amp)
        print(fftInfo_arg)

        # save fyi FITS files
        hdu = pyfits.PrimaryHDU(amp.data)
        hdulist = pyfits.HDUList([hdu])
        hdu.writeto('junk_test_amp.fits', clobber=True)
        hdu = pyfits.PrimaryHDU(arg.data)
        hdulist = pyfits.HDUList([hdu])
        hdu.writeto('junk_test_arg.fits', clobber=True)

        ## take info from the FFTs to send corrective movements

        # thresholds
        fft_ampl_high_freq_lowlimit = 2.4e5 # for good fringe visibility
        fft_ampl_low_freq_lowlimit = 1.4e6 # for acceptable AO correction
        fft_phase_vec_high_freq_highlimit = 5 # for Airy overlap
        std_lowFreqPerfect_lowlimit = 10 # for Airy overlap
        phase_normVec_highFreqPerfect_R_x # for phase of high-freq fringes

        # poor overlap of the Airy PSFs?
        print("--------------------------")
        print("Std of phase of low freq lobe:")
        print(fftInfo_arg["std_lowFreqPerfect"])
        ## HOW DOES IT COMPARE WITH std_lowFreqPerfect_lowlimit ?

        # poor fringe visibility
        print("Median of ampl of high freq lobe:")
        print(fftInfo_amp["med_highFreqPerfect_R"])
        ## HOW DOES IT COMPARE W fft_ampl_high_freq_lowlimit

        # high-freq fringes have strange phase
        ## ## change FPC TT until phase gradients in PTF are removed
        ## ## 1. Differential tip: phase gradient is all up-down, and the low-freq node in FT amplitude takes on a crushed ellipticity.
        ## ## 2. Differentia tilt: phase gradient is left-right, but it is not continuous; it is divided among the three nodes.
        print("Phase gradient in x of high freq in PTF:")
        print(fftInfo_arg["phase_normVec_highFreqPerfect_R"][0])
        print("Phase gradient in y of high freq in PTF:")
        print(fftInfo_arg["phase_normVec_highFreqPerfect_R"][1])

        # other quality control metrics from Phasecam (email from D. Defrere, 2018 Dec 17)
        # PCMSNR: S/N of K-band fringes
        # PCPHSTD: noise of phase in the integration time of NOMIC

        # all together now, lets make corrective movements
        # for better Airy overlap: tip-tilt the FPC
        pi.setINDI("Acromag.FPC.Tip="+'{0:.1f}'.format(vector_move_asec[0])+";Tilt="+'{0:.1f}'.format(vector_move_asec[1])+";Piston=0;Mode=1")

        # for better fringe visibility: move the FPC or HPC in piston
        stepSize = 5. # (um, total OPD)
        # big steps, translation stage: Ubcs.SPC_Trans.command=>5
        pi.setINDI("Ubcs.SPC_Trans.command=>"+'{0:.1f}'.format(10*0.5*stepSize)) # factor of 10 bcz command is in 0.1 um
        # small steps, piezos: Acromag.HPC.Tip=0;Tilt=0;Piston=[stepSize];Mode=1
        ## ## pi.setINDI("Acromag.HPC.Tip=0;Tilt=0;Piston="+'{0:.1f}'.format(stepSize)+";Mode=1")
        ## ## pi.setINDI("Acromag.FPC.Tip=0;Tilt=0;Piston="+'{0:.1f}'.format(stepSize)+";Mode=1")

        end = time.time()
        print(end - start)
        print('-----')

        # turn off fizeau flag to avoid problems with other observations
        print("De-activating ROI aquisition flag")
        pi.setINDI("LMIRCAM.fizRun.value=Off")

        return
コード例 #52
0
    if "F1065C" in indir: lambda0 = 10.65
    if "F1140C" in indir: lambda0 = 11.40
    if "F1550C" in indir: lambda0 = 15.50

    # size of input images (7.04 arcseconds)
    if "MIRI" in indir: instr = 'MIRI'
    elif "NIRCam" in indir: instr = 'NIRCam'

    os.chdir(
        '/Users/lajoie/Documents/Work/Projects/JWST/Simulations/Coronagraphs/Dither-LOCI/Results/'
        + indir)
    #os.chdir('/Users/lajoie/Documents/Work/Projects/JWST/CWG/SGD/'+indir)
    directory = os.getcwd()

    input_Unocculted = glob.glob('*run1_Unocculted*.fits')
    hdu = pyfits.open(input_Unocculted[0])
    unocculted = hdu[0].data
    hdu.close()
    dim = unocculted.shape[0]
    npix = dim**2

    # Binary aperture centered on (xmid, ymid): 0 within "radius", 1 outside
    xmid, ymid = dim / 2 - 1, dim / 2 - 1
    radius = 4
    aperture = np.array([[
        1. if (np.sqrt((i - xmid)**2 + (j - ymid)**2) < radius) else 0.
        for i in xrange(dim)
    ] for j in xrange(dim)])
    psf_aper = np.convolve(unocculted.flatten(), aperture.flatten())

    binsize = 1.0
コード例 #53
0
#   script to produce moon and sun az an el synchronized to 10 and 15 GHz pointing files

import cofe_util as util
import pyfits
import numpy as np
import cPickle

#start with 10 GHz
# open up the data and pointing files

s10 = pyfits.open('c:/cofe/flight_data/Level1/1.1/all_10GHz_servo.fits')
ut10 = s10['TIME'].data['UT']
lat = s10['gyro_hid'].data['hybridlatitude']
lon = s10['gyro_hid'].data['hybridlongitude']
s10.close()
ndata = len(ut10)
lat = np.zeros(ndata, dtype=np.float32) + 0.596
lon = np.zeros(ndata, dtype=np.float32) - 1.8179
azsun, elsun = util.get_cofe_target(ut10, lat, lon, 'Sun')
azmoon, elmoon = util.get_cofe_target(ut10, lat, lon, 'Moon')
azsun = azsun * 180. / np.pi
elsun = elsun * 180. / np.pi
azmoon = azmoon * 180. / np.pi
elmoon = elmoon * 180. / np.pi
sun = [azsun, elsun]
moon = [azmoon, elmoon]
sunfile10 = open('c:/cofe/flight_data/Level1/1.1/sunazel10ghz.pkl', 'wb')
cPickle.dump(sun, sunfile10)
sunfile10.close()
moonfile10 = open('c:/cofe/flight_data/Level1/1.1/moonazel10ghz.pkl', 'wb')
cPickle.dump(moon, moonfile10)
コード例 #54
0
def calfits_to_Bcal(cfits, inp_cfile, out_cfile=None, overwrite=False):
    """
    Take a calfits antenna gain file and insert data into an existing
    CASA bandpass calibration table. Note that due to the obtuseness of CASA,
    this function is VERY rigid: the calfits file must be very similar in shape
    and order to the CASA Bcal table.

    It is only recommended to use this script on calfits files that were originally
    *B.cal tables, exported to B.cal.npz files by sky_cal.py and then converted to
    calfits via skynpz2calfits.py.

    Args:
        cfits : str, filepath to pyuvdata calfits file
        inp_cfile : str, filepath to CASA Bandpass calibration table to use as a template
        out_cfile : str, filepath for output CASA Bcal table with cfits data
        overwrite : bool, if True, overwrite output Bcal table
    """
    # assert IO
    if out_cfile is None:
        out_cfile = inp_cfile        
    if os.path.exists(out_cfile) and not overwrite:
        raise IOError("Output cal table {} exists and overwrite is False...".format(out_cfile))

    # move inp_cfile to out_cfile
    if os.path.exists(out_cfile):
        shutil.rmtree(out_cfile)
    shutil.copytree(inp_cfile, out_cfile)

    # load cfits data and get metadata
    hdu = pyfits.open(cfits)
    head = hdu[0].header
    data = hdu[0].data

    # open out_cfile descriptor
    tb.open(out_cfile)
    assert "CPARAM" in tb.getdminfo()['*1']['COLUMNS'], "{} is not a CASA bandpass table...".format(inp_cfile)
    d = tb.getcol("CPARAM")
    f = tb.getcol("FLAG")
    a = tb.getcol("ANTENNA1")

    # The pol axes must match in size
    assert head['NAXIS2'] == d.shape[0], "Npols doesn't match between {} and {}".format(inp_cfile, cfits)

    # real and imag are 0, 1 Image Array axes of fits file
    flags = data[:, 0, :, :, :, 2]
    data = data[:, 0, :, :, :, 0].astype(np.complex) - 1j * data[:, 0, :, :, :, 1]  # CASA conjugates cal solutions...

    # extend to matching antennas
    Nants, Nfreqs, Ntimes, Npols = data.shape
    ants = hdu[1].data['ANTARR'].astype(np.int).tolist()
    _data, _flags = [], []
    for i, ant in enumerate(a):
        if ant in ants:
            aind = ants.index(ant)
            _data.append(data[aind])
            _flags.append(flags[aind])
        else:
            _data.append(np.ones((Nfreqs, Ntimes, Npols), dtype=np.complex))
            _flags.append(np.ones((Nfreqs, Ntimes, Npols), dtype=np.float))
    data = np.asarray(_data, dtype=np.complex)
    flags = np.asarray(_flags, dtype=np.float)    

    # cal table is ordered as ant1_time1, ant2_time1, ... ant1_time2, ant2_time2
    Nants, Nfreqs, Ntimes, Npols = data.shape
    data = np.moveaxis(data, 2, 0).reshape(Nants * Ntimes, Nfreqs, Npols).T
    flags = np.moveaxis(flags, 2, 0).reshape(Nants * Ntimes, Nfreqs, Npols).T

    # now select frequencies that match cal table
    tb.close()
    tb.open("{}/SPECTRAL_WINDOW".format(out_cfile))
    fr = tb.getcol("CHAN_FREQ")[:, 0]
    tb.close()
    freqs = np.arange(head["NAXIS4"]) * head["CDELT4"] + head["CRVAL4"]
    fselect = np.array([np.isclose(_f, fr).any() for _f in freqs])
    data = data[:, fselect, :]
    flags = flags[:, fselect, :]

    # the two arrays must match in shape now
    assert data.shape == d.shape, "fits_data.shape != cal_data.shape..."
    assert flags.shape == f.shape, "fits_flags.shape != cal_flags.shape..."

    # putcol
    print("...inserting {} data and flags into {}".format(cfits, out_cfile))
    tb.open(out_cfile, nomodify=False)
    tb.putcol("CPARAM", data)
    tb.putcol("FLAG", flags)
    tb.close()

    return out_cfile
コード例 #55
0
ファイル: fitsimage.py プロジェクト: dhorkin/lemon
    def update_keyword(self, keyword, value, comment=None):
        """ Updates the value of a FITS keyword, adding it if it does not exist.

        The method updates the value of a keyword in the FITS header, replacing
        it with the specified value or simply adding it in case if does not yet
        exist. Note that, although always upper-case inside the FITS file,
        keywords are here case-insensitive, for user's convenience.

        Raises ValueError if a HIERARCH keyword (that is, a keyword longer than
        eight characters or that contains spaces) and its value exceed eighty
        characters. The reason for this limitation is that PyFITS does not
        support CONTINUE for HIERARCH. If the value is too long, therefore,
        make sure that the keyword does not need to be HIERARCH-ed.

        Keyword arguments:
        comment - the comment to be added to the keyword.

        """

        if len(keyword) > 8:
            msg = "%s: keyword '%s' is longer than eight characters or " \
                  "contains spaces; a HIERARCH card will be created"
            logging.debug(msg % (self.path, keyword))

        handler = pyfits.open(self.path, mode='update')
        msg = "%s: file opened to update '%s' keyword" % (self.path, keyword)
        logging.debug(msg)

        try:
            header = handler[0].header

            # Ignore the 'card is too long, comment is truncated' warning
            # printed by PyRAF in case, well, the comment is too long.
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                header[keyword] = (value, comment)
                args = self.path, keyword, value
                msg = "%s: keyword '%s' updated to '%s'" % args
                if comment:
                    msg += " with comment '%s'" % comment
                logging.debug(msg)

            # Update in-memory copy of the FITS header
            self._header = header

        except ValueError, e:

            # ValueError is raised if a HIERARCH keyword is used and the total
            # length (keyword, equal sign string and value) is greater than 80
            # characters. The default exception message is a bit cryptic ("The
            # keyword {...} with its value is too long"), so add some more
            # information to help the user understand what went wrong.

            pattern = "The keyword .*? with its value is too long"
            if re.match(pattern, str(e)):
                assert len(keyword) > 8
                msg = ("%s: keyword '%s' could not be updated (\"%s\"). Note "
                       "that PyFITS does not support CONTINUE for HIERARCH. "
                       "In other words: if your keyword has more than eight "
                       "characters or contains spaces, the total length of "
                       "the keyword with its value cannot be longer than %d "
                       "characters.")
                args = self.path, keyword, str(e), pyfits.Card.length
                logging.warning(msg % args)
                raise ValueError(msg % args)
            else:
                # Different ValueError, re-raise it
                msg = "%s: keyword '%s' could not be updated (%s)"
                args = self.path, keyword, e
                logging.warning(msg % args)
                raise
コード例 #56
0
    if 'VELSHIFT' in imfits[0].header:
        old_sh = imfits[0].header['VELSHIFT']
    imfits[0].header['VELSHIFT'] = (vshift + old_sh, 'Lambda shift in km/s')

    print('# {0} updated with {1:.1f} km/s shift (accum. {2:.0f} km/s)'.format(
        fitsfile, vshift, vshift + old_sh))
    return


if __name__ == '__main__':

    lbc = args.line
    linput = args.INPUT

    for fitsfile in glob(linput):
        imfits = pf.open(fitsfile, mode='update')

        if args.rm is True:
            if 'VELSHIFT' in imfits[0].header:
                vshift = -imfits[0].header['VELSHIFT']
                apply_shift(imfits, vshift)
            else:
                warn('No VELSHIFT for {0}'.format(fitsfile))

        elif args.vs != 0:
            v0 = 0
            if 'VELSHIFT' in imfits[0].header:
                v0 = imfits[0].header['VELSHIFT']
            vshift = args.vs - v0
            apply_shift(imfits, vshift)
コード例 #57
0
        #sources=self.source_list
        self.source_list = []
        return result


if __name__ == "__main__":

    nsources = len(sys.argv) - 4
    if nsources < 2:
        print "Usage:", sys.argv[
            0], " <mask> <nout> <signal 1> <signal 2> [signal 3,4...] <out prefix>"

        sys.exit(-1)

    mask = pyfits.open(sys.argv[1])[0].data

    nout = int(sys.argv[2])
    out_prefix = sys.argv[3 + nsources]
    print "out prefix:", out_prefix
    mi = mask_icaer(nout)
    mi.mask = (mask.astype('int'))
    for i in range(3, 3 + nsources):
        print "loading:", sys.argv[i]
        mi.append_source(pyfits.open(sys.argv[i])[0].data)

    print "Performing ICA"

    sources = mi.perform()

    for i in range(0, nout):
コード例 #58
0
ファイル: fitsimage.py プロジェクト: dhorkin/lemon
    def __init__(self, path):
        """ Instantiation method for the FITSImage class.

        A copy of the header of the FITS file is kept in memory for fast access
        to its keywords. IOError is raised if 'path' does not exist or is not
        readable, and NonStardardFITS in case that it does not conform to the
        FITS standard or it simply is not a FITS file at all.

        FITS Standard Document:
        http://fits.gsfc.nasa.gov/fits_standard.html

        An image is considered to follow the FITS standard if it has 'SIMPLE'
        as its first keyword of the primary header. According to the standard,
        it contains a logical constant with the value 'T' if the file conforms
        to it. This keyword is mandatory for the primary header and is not
        permitted in extension headers. A value of 'F', on the other hand,
        means that the file does not conform to the standard.

        We trust the 'SIMPLE' keyword blindly: if is says that the FITS image
        follows the standard, we believe it. Period. We do not consider the
        possibility (although this may change in the future, if we begin to
        work with much less reliable data) that the keyword has a value of 'T'
        while at the same time there are violations of the standard. That is
        why we instruct PyFITS to ignore any FITS standard violations we come
        across (output_verify = 'ignore').

        """

        if not os.path.exists(path):
            raise IOError("file '%s' does not exist" % path)

        self.path = path

        try:
            # The file must be opened to make sure it is a standard FITS.
            # We would rather use the with statement, but in that case we
            # would not be able to set the output verification of close() to
            # 'ignore'. Thus, the default option, 'exception', which raises
            # an exception if any FITS standard is violated, would be used.
            handler = pyfits.open(self.path, mode='readonly')
            try:

                # This is kind of an ugly hack to make sure that all PyFITS
                # versions are supported. Up to version 3.2, PyFITS returned
                # the HDUs exactly as it saw them. This allowed us to check the
                # existence and value of the 'SIMPLE' keyword in order to make
                # sure the file conforms to the FITS standard. However, PyFITS
                # 3.3 changed this, and now adds the keywords required for a
                # minimal viable primary HDU: this means that the header will
                # always contain the 'SIMPLE' keyword. Refer to this link for
                # more info: https://github.com/spacetelescope/PyFITS/issues/94

                try:
                    type_ = pyfits.info(self.path, output=False)[0][2]
                    if type_ == 'NonstandardHDU':
                        # 'SIMPLE' exists but does not equal 'T'
                        msg = "%s: value of 'SIMPLE' keyword is not 'T'"
                        raise NonStandardFITS(msg % self.path)

                except AttributeError as e:
                    # 'SIMPLE' keyword does not exist
                    error_msg = "'_ValidHDU' object has no attribute '_summary'"
                    assert error_msg in str(e)
                    msg = "%s: 'SIMPLE' keyword missing from header"
                    raise NonStandardFITS(msg % self.path)

                # A copy of the FITS header is kept in memory and the file is
                # closed; otherwise we may run into trouble when working with
                # thousands of images ("too many open files" and such). This
                # approach gives us fast read-only access to the image header;
                # if modified, we will have to take care of 'reloading' (call
                # it synchronize, if you wish) the header.

                self.size = handler[0].data.shape[::-1]
                self._header = handler[0].header
            finally:
                handler.close(output_verify='ignore')

        # PyFITS raises IOError if we do not have permission to open the file,
        # if we attempt to open a non-FITS file, and also if we open one whose
        # first keyword is not either SIMPLE or XTENSION. Nothing is raised if
        # the value of SIMPLE is 'F'; that is why we had to specifically make
        # sure it was 'T' a few lines above.
        except IOError, e:
            pyfits_msg = "Block does not begin with SIMPLE or XTENSION"
            if str(e) == pyfits_msg:
                msg = "%s: 'SIMPLE' keyword missing from primary header"
                raise NonStandardFITS(msg % self.path)
            elif "Permission denied" in str(e):
                raise
            else:
                msg = "%s (%s)" % (self.path, str(e))
                raise NonStandardFITS(msg)
コード例 #59
0
def run_momentum_figure(gal, aname):
    print 'Making plot...'
    eps_min = -2
    eps_max = 2
    rr_min = 0
    rr_max = 30
    zz_min = -10
    zz_max = 10
    rad_min = 0
    rad_max = 100.

    print aname
    plt.ioff()
    plt.close('all')
    fig = figure(figsize=(9,12))

    a = pyfits.open('/nobackupp2/rcsimons/momentum_measurements/%s/%s_%s_momentum.fits'%(gal, gal, aname))

    epsilon_stars = a['STARS_EPSILON'].data
    rr_stars=a['STARS_CYLINDRICAL_POSITION'].data[0]
    zz_stars=a['STARS_CYLINDRICAL_POSITION'].data[1]
    rad_stars = sqrt(sum(a['STARS_XYZ_POSITION'].data**2., axis = 0))

    star_age=a['STAR_AGE'].data
    star_mass=a['STAR_MASS'].data

    ax = fig.add_subplot(431)
    cg_str = "Cold Gas\n"+r"T < 10$^4$ K"
    ax = make_cold_gas_heatmap(ax, a['GAS_ZZ_EPSILON'].data, a['GAS_ZZ_EPSILON_EDGES'].data[0], a['GAS_ZZ_EPSILON_EDGES'].data[1],
                                 xlabel = '', ylabel = r'$\frac{j_z}{j_{circ}}$')
    add_at(ax, cg_str, loc=4)


    ax = fig.add_subplot(432)
    ax = make_cold_gas_heatmap(ax, a['GAS_RR_EPSILON'].data, a['GAS_RR_EPSILON_EDGES'].data[0], a['GAS_RR_EPSILON_EDGES'].data[1],
                                 xlabel = '', ylabel = '')
    add_at(ax, cg_str, loc=4)

    ax.set_title(gal+"\n"+r"$z=%.2f$"%(1./float(aname.strip('a'))-1.), fontweight = 'bold')

    ax = fig.add_subplot(433)
    ax = make_cold_gas_heatmap(ax, a['GAS_RAD_EPSILON'].data, a['GAS_RAD_EPSILON_EDGES'].data[0], a['GAS_RAD_EPSILON_EDGES'].data[1],
                                 xlabel = '', ylabel = '')
    add_at(ax, cg_str, loc=4)





    ax = fig.add_subplot(434)
    good = where((abs(rr_stars) < rr_max) & (star_age < 20.e6))
    ys_str = "Young stars\nage < 20 Myr"
    ax = make_heatmap(ax, epsilon_stars, zz_stars, zz_min, zz_max, weights = star_mass, good = good, xlabel = '', ylabel = r'$\frac{j_z}{j_{circ}}$', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, ys_str, loc=4)
    

    ax = fig.add_subplot(435)
    good = where((abs(zz_stars) < zz_max) & (star_age < 20.e6) & isfinite(epsilon_stars))
    ax = make_heatmap(ax, epsilon_stars, rr_stars, rr_min, rr_max, weights = star_mass, good = good, xlabel = '', ylabel = '', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, ys_str, loc=4)


    ax = fig.add_subplot(436)
    good = where((rad_stars < rad_max) & (star_age < 20.e6) & isfinite(epsilon_stars))
    ax = make_heatmap(ax, epsilon_stars, rad_stars, rad_min, rad_max, weights = star_mass, good = good, xlabel = '', ylabel = '', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, ys_str, loc=4)



    is_str = "Intermediate stars\n100 < age < 300 Myr"
    ax = fig.add_subplot(437)
    good = where((abs(rr_stars) < rr_max) & (star_age > 1.e8) & (star_age < 3.e8))
    ax = make_heatmap(ax, epsilon_stars, zz_stars, zz_min, zz_max, weights = star_mass, good = good, xlabel = '', ylabel = r'$\frac{j_z}{j_{circ}}$', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, is_str, loc=4)
    

    ax = fig.add_subplot(438)
    good = where((abs(zz_stars) < zz_max) & (star_age > 1.e8) & (star_age < 3.e8) & isfinite(epsilon_stars))
    ax = make_heatmap(ax, epsilon_stars, rr_stars, rr_min, rr_max, weights = star_mass, good = good, xlabel = '', ylabel = '', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, is_str, loc=4)

    ax = fig.add_subplot(439)
    good = where((rad_stars < rad_max) & (star_age > 1.e8) & (star_age < 3.e8) & isfinite(epsilon_stars))
    ax = make_heatmap(ax, epsilon_stars, rad_stars, rad_min, rad_max, weights = star_mass, good = good, xlabel = '', ylabel = '', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, is_str, loc=4)




    ax = fig.add_subplot(4,3,10)
    os_str = "Old stars\nage > 1 Gyr"
    good = where((abs(rr_stars) < rr_max) & (star_age > 1.e9))
    ax = make_heatmap(ax, epsilon_stars, zz_stars, zz_min, zz_max, weights = star_mass, good = good, xlabel = 'distance above disk\n(kpc)', ylabel = r'$\frac{j_z}{j_{circ}}$', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, os_str, loc=4)



    ax = fig.add_subplot(4,3,11)
    good = where((abs(zz_stars) < zz_max) & (star_age > 1.e9) & isfinite(epsilon_stars))
    ax = make_heatmap(ax, epsilon_stars, rr_stars, rr_min, rr_max, weights = star_mass, good = good, xlabel = 'distance along disk\n(kpc)', ylabel = '', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)

    add_at(ax, os_str, loc=4)

    ax = fig.add_subplot(4,3,12)
    good = where((rad_stars < rad_max) & (star_age > 1.e9) & isfinite(epsilon_stars))
    ax = make_heatmap(ax, epsilon_stars, rad_stars, rad_min, rad_max, weights = star_mass, good = good, xlabel = 'distance radial\n(kpc)', ylabel = '', 
                 bins_n = 50, eps_min = eps_min, eps_max = eps_max)
    add_at(ax, os_str, loc=4)



    fig.subplots_adjust(hspace = 0.0)

    print 'Saving plot...'

    savefig('/nobackupp2/rcsimons/figures/momentum_figures/%s_%s_momentum_heat.png'%(gal, aname), dpi = 300)
    plt.close('all')
    return
コード例 #60
0
    def __init__(self, prefix):
        self.prefix = prefix
        self.fullname = fullname[self.prefix]
        self.cra = racenter[self.prefix]
        self.cdec = deccenter[self.prefix]
        self.cz = redshift[self.prefix]
        self.f80 = F80[self.prefix]
        self.errf80 = ErrorF80[self.prefix]
        self.csigma = sigma[self.prefix]
        self.csigmaerrplus = errsigmaplus[self.prefix]
        self.csigmaerrminus = errsigmaminus[self.prefix]
        self.r200 = 2.02 * (
            self.csigma) / 1000. / sqrt(OmegaL + OmegaM *
                                        (1. + self.cz)**3) * H0 / 70.  # in Mpc
        if self.r200 < .5:
            print 'WARNING:  R200 unrealistically small for ', self.prefix
            print 'resetting R200 to 0.5 Mpc'
            self.r200 = .5
        self.r200deg = self.r200 * 1000. / my.DA(self.cz, h) / 3600.
        self.mcl = my.clusterMass(self.csigma, self.cz, h)

        mastertable = mastertablepath + self.fullname + 'mastertable.fits'
        tb = pyfits.open(mastertable)
        tbdata = tb[1].data
        tb.close()

        self.ediscsID = tbdata.field('EDISCS-ID')
        self.ediscsIDold = tbdata.field('EDISCS-ID-OLD')
        self.ra = tbdata.field('RA')
        self.dec = tbdata.field('DEC')
        self.xcorr = tbdata.field('xcorr')
        self.ycorr = tbdata.field('ycorr')
        self.starflag = tbdata.field('starflag')
        self.EW = tbdata.field('EW')
        self.EWerr = tbdata.field('EWerr')
        self.SFR = tbdata.field('SFR')
        self.SFRerr = tbdata.field('SFRerr')
        self.matchflaghalpha = tbdata.field('matchflaghalpha')
        self.onHaimageflag = tbdata.field('onHaimageflag')
        self.sfflag = tbdata.field('sfflag')
        self.matchflag24 = tbdata.field('matchflag24')
        self.on24imageflag = tbdata.field('on24imageflag')
        self.flux24 = tbdata.field('flux24')
        self.flux24err = tbdata.field('flux24err')
        self.nmatchediscs24 = tbdata.field('nmatchediscs24')
        self.snr24 = tbdata.field('snr24')
        self.imagex24 = tbdata.field('imagex24')
        self.imagey24 = tbdata.field('imagey24')
        self.ra24 = tbdata.field('ra24')
        self.dec24 = tbdata.field('dec24')
        self.flux80flag = tbdata.field('flux80flag')
        self.L24 = tbdata.field('L24')
        self.L24err = tbdata.field('L24err')
        self.Lir = tbdata.field('Lir')
        self.errLir = tbdata.field('errLir')
        self.SFRir = tbdata.field('SFRir')
        self.SFRirerr = tbdata.field('SFRirerr')
        self.matchflagediscsirac = tbdata.field('matchflagediscsirac')
        self.iracf1 = tbdata.field('iracf1')
        self.iracf2 = tbdata.field('iracf2')
        self.iracf3 = tbdata.field('iracf3')
        self.iracf4 = tbdata.field('iracf4')
        self.erriracf1 = tbdata.field('erriracf1')
        self.erriracf2 = tbdata.field('erriracf2')
        self.erriracf3 = tbdata.field('erriracf3')
        self.erriracf4 = tbdata.field('erriracf4')
        self.iracsexflag0 = tbdata.field('iracsexflag0')
        self.iracsexflag1 = tbdata.field('iracsexflag1')
        self.iracwch1 = tbdata.field('iracwch1')
        self.iracwch2 = tbdata.field('iracwch2')
        self.iracwch3 = tbdata.field('iracwch3')
        self.iracwch4 = tbdata.field('iracwch4')
        self.iracwmin = tbdata.field('iracwmin')
        self.nmatchediscsirac = tbdata.field('nmatchediscsirac')
        self.matchflagmorphgimtype = tbdata.field('matchflagmorphgimtype')
        self.gimtype = tbdata.field('gimtype')
        self.matchflagvistype = tbdata.field('matchflagvistype')
        self.vistype = tbdata.field('vistype')
        self.misoV = tbdata.field('misoV')
        self.misoeVapsim = tbdata.field('misoeVapsim')
        self.misoR = tbdata.field('misoR')
        self.misoeRapsim = tbdata.field('misoeRapsim')
        self.misoI = tbdata.field('misoI')
        self.misoeIapsim = tbdata.field('misoeIapsim')
        self.misoJ = tbdata.field('misoJ')
        self.misoeJapsim = tbdata.field('misoeJapsim')
        self.misoK = tbdata.field('misoK')
        self.misoeKapsim = tbdata.field('misoeKapsim')
        self.magV = tbdata.field('magV')
        self.mageVapsim = tbdata.field('mageVapsim')
        self.magR = tbdata.field('magR')
        self.mageRapsim = tbdata.field('mageRapsim')
        self.magI = tbdata.field('magI')
        self.mageIapsim = tbdata.field('mageIapsim')
        self.magJ = tbdata.field('magJ')
        self.mageJapsim = tbdata.field('mageJapsim')
        self.magK = tbdata.field('magK')
        self.mageKapsim = tbdata.field('mageKapsim')
        self.membflag = tbdata.field('membflag')
        self.newspecmatchflag = tbdata.field('newspecmatchflag')
        self.defmembflag = tbdata.field('defmembflag')
        self.photmembflag = tbdata.field('photmembflag')
        self.supermembflag = tbdata.field('supermembflag')
        self.specz = tbdata.field('specz')
        self.spectype = tbdata.field('spectype')
        self.specEWOII = tbdata.field('specEWOII')
        self.matchflagspecediscs = tbdata.field('matchflagspecediscs')
        self.specEWOIIflag = tbdata.field('specEWOIIflag')
        self.bestz = tbdata.field('bestz')
        self.lowz = tbdata.field('lowz')
        self.highz = tbdata.field('highz')
        self.wmin = tbdata.field('wmin')
        self.Pclust = tbdata.field('Pclust')
        self.MR = tbdata.field('MR')
        self.MU = tbdata.field('MU')
        self.MV = tbdata.field('MV')
        self.MB = tbdata.field('MB')
        self.stellarmass = tbdata.field('stellmass')
        self.redflag = tbdata.field('redflag')
        self.LUlowzclust = tbdata.field('LUlowzclust')
        self.LUbestzclust = tbdata.field('LUbestzclust')
        self.LUhighzclust = tbdata.field('LUhighzclust')
        self.LBlowzclust = tbdata.field('LBlowzclust')
        self.LBbestzclust = tbdata.field('LBbestzclust')
        self.LBhighzclust = tbdata.field('LBhighzclust')
        self.LVlowzclust = tbdata.field('LVlowzclust ')
        self.LVbestzclust = tbdata.field('LVbestzclust')
        self.LVhighzclust = tbdata.field('LVhighzclust')
        self.LRlowzclust = tbdata.field('LRlowzclust')
        self.LRbestzclust = tbdata.field('LRbestzclust')
        self.LRhighzclust = tbdata.field('LRhighzclust')
        self.LIlowzclust = tbdata.field('LIlowzclust')
        self.LIbestzclust = tbdata.field('LIbestzclust')
        self.LIhighzclust = tbdata.field('LIhighzclust')
        self.LJlowzclust = tbdata.field('LJlowzclust')
        self.LJbestzclust = tbdata.field('LJbestzclust')
        self.LJhighzclust = tbdata.field('LJhighzclust')
        self.LKlowzclust = tbdata.field('LKlowzclust')
        self.LKbestzclust = tbdata.field('LKbestzclust')
        self.LKhighzclust = tbdata.field('LKhighzclust')

        # some extra quantities that are not included in the mastertable
        dr = sqrt((self.ra - self.cra)**2 + (self.dec - self.cdec)**2)
        self.drflag = (dr < self.r200deg)