def savefits(self,filename,bary=False):
        '''
        outputs a FITS file containing the histogram lightcurve
        v1.0 Kieran O'Brien - Dec 2011
        '''
#        try:
        if bary:
            print 'saving barytime corrected lightcurve'
            col1=pyfits.Column(name='BARYTIME',format='E', array=self.barytimes)
            col2=pyfits.Column(name='COUNTS',format='E', array=self.obsheights)
            cols=pyfits.ColDefs([col1,col2])
            tbhdu=pyfits.new_table(cols)
            hdu=pyfits.PrimaryHDU(0)
            thdulist=pyfits.HDUList([hdu,tbhdu])
            thdulist.writeto(filename)
        else:
            print 'saving uncorrected lightcurve'
            col1=pyfits.Column(name='TIME',format='E', array=self.obstimes)
            col2=pyfits.Column(name='COUNTS',format='E', array=self.obsheights)
            cols=pyfits.ColDefs([col1,col2])
            tbhdu=pyfits.new_table(cols)
            hdu=pyfits.PrimaryHDU(0)
            thdulist=pyfits.HDUList([hdu,tbhdu])
            thdulist.writeto(filename)
#        except:
#            print 'ERROR: problem writing file'
        return
示例#2
0
文件: file.py 项目: ajmendez/PySurvey
def write(hdu, filename, quiet=False, clobber=True, shortdir=None):
    '''Write a nice fits file.  Filenames with .gz will be compressed using the system
    gzip call.  hdu can be either a column table list or a pyfits.bintable object.
    it notifies the user what was written.
    
    clobber=[True] -- Overwrite files by default
    quiet=[False]  -- Do not announce that we are writing a file.  Why would you not?
    shortdir=['/path/','<Dir>'] -- Shorten the filename that is printed out to the screen
    '''        
    
    # Handle column lists and hdus
    if isinstance(hdu, pyfits.hdu.table.BinTableHDU):
        hdus = hdu
    elif (isinstance(hdu, list) or isinstance(hdu, tuple)) and (isinstance(hdu[0], pyfits.Column)):
        hdus = pyfits.new_table(pyfits.ColDefs(hdu))
    elif isinstance(hdu, Cat):
        hdus = pyfits.new_table(pyfits.ColDefs(hdu.columns))
    else:
        hdus = pyfits.HDUList(hdu)
    
    # Save and compress if needed
    outname = filename.replace('.gz','')
    hdus.writeto(outname, clobber=clobber)
    if '.gz' in filename:
        subprocess.call(['gzip', '-f', outname])
    
    # You should generally tell the user what was saved, but hey you can
    # play with matches if you want.
    if not quiet:
        # print "Saved to: %s"%(filename)
        if shortdir is not None:
            splog('Saved to:', filename.replace(shortdir[0], shortdir[1]))
        else:
            splog('Saved to:', filename)
示例#3
0
def pixelizeCatalog(infiles, config, force=False):
    """
    Break catalog up into a set of healpix files.
    """
    nside_catalog = config['coords']['nside_catalog']
    nside_pixel = config['coords']['nside_pixel']
    outdir = mkdir(config['catalog']['dirname'])
    filenames = config.getFilenames()
    
    for ii,infile in enumerate(infiles):
        logger.info('(%i/%i) %s'%(ii+1, len(infiles), infile))
        f = pyfits.open(infile)
        data = f[1].data
        header = f[1].header
        logger.info("%i objects found"%len(data))
        if not len(data): continue
        glon,glat = cel2gal(data['RA'],data['DEC'])
        catalog_pix = ang2pix(nside_catalog,glon,glat,coord='GAL')
        pixel_pix = ang2pix(nside_pixel,glon,glat,coord='GAL')
        names = [n.upper() for n in data.columns.names]
        ra_idx = names.index('RA'); dec_idx = names.index('DEC')
        idx = ra_idx if ra_idx > dec_idx else dec_idx
        catalog_pix_name = 'PIX%i'%nside_catalog
        pixel_pix_name = 'PIX%i'%nside_pixel

        coldefs = pyfits.ColDefs(
            [pyfits.Column(name='GLON',format='1D',array=glon),
             pyfits.Column(name='GLAT',format='1D',array=glat),
             pyfits.Column(name=catalog_pix_name,format='1J',array=catalog_pix),
             pyfits.Column(name=pixel_pix_name  ,format='1J',array=pixel_pix)]
        )
        hdu = pyfits.new_table(data.columns[:idx+1]+coldefs+data.columns[idx+1:])
        table = hdu.data

        for pix in numpy.unique(catalog_pix):
            logger.debug("Processing pixel %s"%pix)
            outfile = filenames.data['catalog'][pix]
            if not os.path.exists(outfile):
                logger.debug("Creating %s"%outfile)
                names = [n.upper() for n in table.columns.names]
                formats = table.columns.formats
                columns = [pyfits.Column(n,f) for n,f in zip(names,formats)]
                out = pyfits.HDUList([pyfits.PrimaryHDU(),pyfits.new_table(columns)])
                out[1].header['NSIDE'] = nside_catalog
                out[1].header['PIX'] = pix
                out.writeto(outfile)
            hdulist = pyfits.open(outfile,mode='update')
            t1 = hdulist[1].data
            # Could we speed up with sorting and indexing?
            t2 = table[ table[catalog_pix_name] == pix ]
            nrows1 = t1.shape[0]
            nrows2 = t2.shape[0]
            nrows = nrows1 + nrows2
            out = pyfits.new_table(t1.columns, nrows=nrows)
            for name in t1.columns.names:
                out.data.field(name)[nrows1:]=t2.field(name)
            hdulist[1] = out
            logger.debug("Writing %s"%outfile)
            hdulist.flush()
            hdulist.close()
示例#4
0
def make_irf_index(index_mapping, outfile):
    if isinstance(index_mapping, dict):
        evclass, evtype = read_mappings_from_dict(index_mapping)
    else:
        evclass, evtype = read_mappings(index_mapping)

    output = pyfits.HDUList()
    output.append(pyfits.PrimaryHDU())

    evclass_cols = [
        pyfits.Column('EVENT_CLASS', format='60A', unit=' ', array=evclass[0]),
        pyfits.Column('BITPOSITION', format='1I', unit=' ', array=evclass[1]),
        pyfits.Column('EVENT_TYPES', format='1J', unit='  ', array=evclass[2])
    ]
    evclass_hdu = pyfits.new_table(evclass_cols)
    evclass_hdu.name = "BITMASK_MAPPING"
    output.append(evclass_hdu)

    evtype_cols = [
        pyfits.Column('EVENT_TYPE', format='60A', unit=' ', array=evtype[0]),
        pyfits.Column('BITPOSITION', format='1I', unit=' ', array=evtype[1]),
        pyfits.Column('EVENT_TYPE_PARTITION',
                      format='20A',
                      unit=' ',
                      array=evtype[2])
    ]
    evtype_hdu = pyfits.new_table(evtype_cols)
    evtype_hdu.name = "EVENT_TYPE_MAPPING"
    output.append(evtype_hdu)

    output.writeto(outfile, clobber=True)
示例#5
0
def add_columns(filename, colnames, newarrays, formats):
    # append new columns to an existing table
    oldfile = filename + ".OLD"
    try:
        os.system('mv %s %s' % (filename, oldfile))
    except:
        os.system('rm %s' % oldfile)
        os.system('mv %s %s' % (filename, oldfile))
    # write the new columns to a temporary table
    newcols = []
    for i in range(len(colnames)):
        newcols += [
            pyfits.Column(name=colnames[i],
                          format=formats[i],
                          array=newarrays[i])
        ]
    newcols = pyfits.ColDefs(newcols)
    tbhdu = pyfits.new_table(newcols)
    try:
        tbhdu.writeto('temp.fits')
    except:
        os.system('rm temp.fits')
        tbhdu.writeto('temp.fits')
    # Now read in the old table and merge
    h1 = pyfits.open(oldfile)
    h2 = pyfits.open('temp.fits')
    h = h1[1].columns + h2[1].columns  # merge the columns
    newhdu = pyfits.new_table(h)
    newhdu.writeto(filename)
    return 0
示例#6
0
文件: skymap.py 项目: balbinot/ugali
def writeSparseHealpixMap(pix, data_dict, nside, outfile,
                          distance_modulus_array = None,
                          coordsys = 'NULL', ordering = 'NULL',
                          header_dict = None):
    """
    Sparse HEALPix maps are used to efficiently store maps of the sky by only
    writing out the pixels that contain data.

    Three-dimensional data can be saved by supplying a distance modulus array
    which is stored in a separate extension.
    
    coordsys [gal, cel]
    ordering [ring, nest]
    """

    hdul = pyfits.HDUList()

    # Pixel data extension
    columns_array = [pyfits.Column(name = 'PIX',
                                   format = 'K',
                                   array = pix)]

    for key in data_dict.keys():
        if data_dict[key].shape[0] != len(pix):
            logger.warning('First dimension of column %s (%i) does not match number of pixels (%i).'%(key,
                                                                                                      data_dict[key].shape[0],
                                                                                                      len(pix)))
        
        if len(data_dict[key].shape) == 1:
            columns_array.append(pyfits.Column(name = key,
                                               format = 'E',
                                               array = data_dict[key]))
        elif len(data_dict[key].shape) == 2:
            columns_array.append(pyfits.Column(name = key,
                                               format = '%iE'%(data_dict[key].shape[1]),
                                               array = data_dict[key]))
        else:
            logger.warning('Unexpected number of data dimensions for column %s.'%(key))
    
    hdu_pix_data = pyfits.new_table(columns_array)
    hdu_pix_data.header.update('NSIDE', nside)
    hdu_pix_data.header.update('COORDSYS', coordsys.upper())
    hdu_pix_data.header.update('ORDERING', ordering.upper())
    hdu_pix_data.header.update(header_dict)
    hdu_pix_data.name = 'PIX_DATA'
    hdul.append(hdu_pix_data)

    # Distance modulus extension
    if distance_modulus_array is not None:
        hdu_distance_modulus = pyfits.new_table([pyfits.Column(name = 'DISTANCE_MODULUS',
                                                               format = 'E',
                                                               array = distance_modulus_array)])
        hdu_distance_modulus.name = 'DISTANCE_MODULUS'
        hdul.append(hdu_distance_modulus)

    hdul.writeto(outfile, clobber = True)
示例#7
0
def main():
   import glob
   import numpy as np
   import pyfits as pf
   import matplotlib.pyplot as plt

   dirlst = glob.glob('*cloud*')
   dirlst.sort()

   for dirname in dirlst:
      print dirname

      filelist = glob.glob(dirname+'/*.21')
      filelist.sort()

      hdu = pf.PrimaryHDU(np.arange(100))
      hdulist = pf.HDUList([hdu])

      for fname in filelist:
         logg, partsize, temperature, metal, lam, flam = getDataCloudy(fname)
         c1 = pf.Column(name='Wavelength',format='E',array=lam)
         c2 = pf.Column(name='Flux',format='E',array=flam)
         tbhdu = pf.new_table([c1,c2])
         tbhdu.header.update('LOGG',logg)
         tbhdu.header.update('TEMPERAT',temperature,'units K')
         tbhdu.header.update('PARTSIZE',partsize,'units microns')
         tbhdu.header.update('Z',metal)
         hdulist.append(tbhdu)
   
      hdulist.writeto(dirname+'.fits')

   dirlst = glob.glob('clr*')
   dirlst.sort()

   for dirname in dirlst:
      print dirname

      filelist = glob.glob(dirname+'/*.clr')
      filelist.sort()

      hdu = pf.PrimaryHDU(np.arange(100))
      hdulist = pf.HDUList([hdu])

      for fname in filelist:
         logg, temperature, eddy, metal, lam, flam = getDataClr(fname)
         c1 = pf.Column(name='Wavelength',format='E',array=lam)
         c2 = pf.Column(name='Flux',format='E',array=flam)
         tbhdu = pf.new_table([c1,c2])
         tbhdu.header.update('LOGG',logg)
         tbhdu.header.update('TEMPERAT',temperature,'units K')
         tbhdu.header.update('EDDYCOEF',eddy)
         tbhdu.header.update('Z',metal)
         hdulist.append(tbhdu)
   
      hdulist.writeto(dirname+'.fits')
示例#8
0
    def testCombineCats(self):

        normkeys = 'FLUX_APER1 FLUXERR_APER1 MAG_APER1 MAGERR_APER1 BLANK1 BLANK2'.split(
        )
        mastercols = [pyfits.Column(name = k,
                                    format = 'E',
                                    array = numpy.ones(30)) \
                          for k in normkeys]
        mastercols[0] = pyfits.Column(name='FLUX_APER1',
                                      format='E',
                                      array=numpy.random.standard_normal(30))
        zerokeys = 'MaxVal BackGr'.split()
        for key in zerokeys:
            mastercols.append(
                pyfits.Column(name=key, format='E', array=numpy.zeros(30)))
        mastercols.append(
            pyfits.Column(name='IMAFLAGS_ISO',
                          format='J',
                          array=numpy.ones(30)))

        cats = [ldac.LDACCat(pyfits.new_table(pyfits.ColDefs(mastercols)))]

        for i in xrange(5):
            cols = [pyfits.Column(name = k,
                                  format = 'E',
                                  array = numpy.random.standard_normal(30)) \
                        for k in normkeys]
            cols[0] = pyfits.Column(name='FLUX_APER1',
                                    format='E',
                                    array=numpy.random.standard_normal(30))

            for key in zerokeys:
                cols.append(
                    pyfits.Column(name=key, format='E', array=numpy.zeros(30)))
            cols.append(
                pyfits.Column(name='IMAFLAGS_ISO',
                              format='E',
                              array=numpy.ones(30)))

            cats.append(ldac.LDACCat(pyfits.new_table(pyfits.ColDefs(cols))))

        keys = normkeys[2:] + zerokeys
        keys.append('IMAFLAGS_ISO')

        combinedcat = combineCats(cats, saturation=5)

        self.assertEqual(type(combinedcat), type(cats[0]))
        for key in keys:
            self.assertTrue(key in combinedcat.keys())
        self.assertTrue((combinedcat['BLANK1'] == 1).all())
        self.assertTrue((combinedcat['BLANK2'] == 1).all())
        self.assertTrue((combinedcat['MAG_APER1'] == 1).all())
        self.assertTrue((combinedcat['MAGERR_APER1'] == 1).all())
        self.assertTrue((combinedcat['FLUX_APER1-1'] != 1).all())
        self.assertTrue((combinedcat['FLUXERR_APER1-1'] != 1).all())
示例#9
0
def gen_UDFspec_fits(sourcefiles, outputdir, clobber=False, verbose=True):
    """

    Load the 1D spectra from a set of source files, and save the indivdual spectra as
    stand alone fits files (makes comparison plotting with MUSEWidePlots.plot_1DspecOverview() straight forward)

    --- EXAMPLE OF USE ---
    import MUSE_TDOSEvsUDF as mtu

    sourcefiles = ['/Volumes/DATABCKUP1/UDFvsMUSEWide/udf10_c042_e031_withz_iter6/udf_udf10_00004.fits','/Volumes/DATABCKUP1/UDFvsMUSEWide/udf10_c042_e031_withz_iter6/udf_udf10_00006.fits','/Volumes/DATABCKUP1/UDFvsMUSEWide/udf10_c042_e031_withz_iter6/udf_udf10_00533.fits']
    outputdir  = '/Volumes/DATABCKUP1/UDFvsMUSEWide/udf10_c042_e031_withz_iter6_1Dspecs/'

    mtu.gen_UDFspec_fits(sourcefiles,outputdir,clobber=False)

    """
    if not os.path.isdir(outputdir):
        sys.exit('Output directory specified (' + outputdir +
                 ') does not appear to exist')

    redshifts, specdic = mtu.load_sourcefile_spectra(sourcefiles,
                                                     verbose=verbose)

    if verbose:
        print(
            ' - Storing spectra extracted from source files to individual fits files'
        )

    for sourcefile in sourcefiles:
        for spec in specdic[sourcefile].keys():
            if spec.startswith('SPE_'):
                specarr = specdic[sourcefile][spec]
                fitsname = outputdir + sourcefile.split('/')[-1].replace(
                    '.fits', '_' + spec + '.fits')
                if verbose: print('   Generating ' + fitsname)
                # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                c1 = pyfits.Column(name='lambda',
                                   format='D',
                                   unit='ang',
                                   array=specarr['lambda'])
                c2 = pyfits.Column(name='flux',
                                   format='D',
                                   unit='1e-20 erg/ang/cm2/s',
                                   array=specarr['flux'])
                c3 = pyfits.Column(name='fluxerror',
                                   format='D',
                                   unit='1e-20 erg/ang/cm2/s',
                                   array=specarr['fluxerror'])

                coldefs = pyfits.ColDefs([c1, c2, c3])
                tb = pyfits.new_table(coldefs)  # creating default header
                head = tb.header
                tbHDU = pyfits.new_table(coldefs, header=head)
                tbHDU.writeto(fitsname, clobber=clobber)
示例#10
0
def add_bins(sname='final'):
    p = pyfits.open(data_path+'gz2sample_%s_abs_regions.fits'%sname)
    d = p['data'].data
    redshift = d.field('redshift')
    zmask = notNaN(redshift)
    oldcols = p['data'].columns
    bincols = {}
    cols = []
    for c in oldcols:
	cols.append(pyfits.Column(name=c.name, format=c.format,
				  array=d.field(c.name)))
    for k in bins.keys():
        x = d.field(k)[zmask]
        bin_min, bin_max, bin_step = bins[k]
        xbin = N.zeros(redshift.shape, N.int) - 9999
        xbinz = (N.floor((x - bin_min) / bin_step)).astype(N.int)
        maxbin = int(round((bin_max - bin_min) / bin_step))
        print k, maxbin
        low = xbinz < 0
        high = xbinz >= maxbin
        xbinz[low] = -999
        xbinz[high] = 999
        xbin[zmask] = xbinz
        name = ('%s_simple_bin'%k).upper()
        cols.append(pyfits.Column(name=name,
                                  format='I', array=xbin))
        bin = N.arange(0, maxbin, 1)
        min = bin * bin_step + bin_min
        max = min + bin_step
        center = min + 0.5*bin_step
        bincols[k] = [pyfits.Column(name='bin', format='I', array=bin),
                      pyfits.Column(name='min', format='E', array=min),
                      pyfits.Column(name='max', format='E', array=max),
                      pyfits.Column(name='centre', 
                                    format='E', array=center)]
    hdulist = pyfits.HDUList()
    hdulist.append(pyfits.PrimaryHDU())
    tbhdu=pyfits.new_table(cols)
    tbhdu.name = 'data'
    hdulist.append(tbhdu)
    for k in bincols.keys():
	c = bincols[k]
	tbhdu=pyfits.new_table(c)
        tbhdu.name = '%s_simple_bins'%k
	hdulist.append(tbhdu)
        outfile = data_path+'gz2sample_%s_abs_regions_bins.fits'%sname
    file_exists = os.path.isfile(outfile)
    if file_exists:
	os.remove(outfile)
    hdulist.writeto(outfile)
    p.close()
示例#11
0
def add_bins(sname='final'):
    p = pyfits.open(data_path + 'gz2sample_%s_abs_regions.fits' % sname)
    d = p['data'].data
    redshift = d.field('redshift')
    zmask = notNaN(redshift)
    oldcols = p['data'].columns
    bincols = {}
    cols = []
    for c in oldcols:
        cols.append(
            pyfits.Column(name=c.name, format=c.format, array=d.field(c.name)))
    for k in bins.keys():
        x = d.field(k)[zmask]
        bin_min, bin_max, bin_step = bins[k]
        xbin = N.zeros(redshift.shape, N.int) - 9999
        xbinz = (N.floor((x - bin_min) / bin_step)).astype(N.int)
        maxbin = int(round((bin_max - bin_min) / bin_step))
        print k, maxbin
        low = xbinz < 0
        high = xbinz >= maxbin
        xbinz[low] = -999
        xbinz[high] = 999
        xbin[zmask] = xbinz
        name = ('%s_simple_bin' % k).upper()
        cols.append(pyfits.Column(name=name, format='I', array=xbin))
        bin = N.arange(0, maxbin, 1)
        min = bin * bin_step + bin_min
        max = min + bin_step
        center = min + 0.5 * bin_step
        bincols[k] = [
            pyfits.Column(name='bin', format='I', array=bin),
            pyfits.Column(name='min', format='E', array=min),
            pyfits.Column(name='max', format='E', array=max),
            pyfits.Column(name='centre', format='E', array=center)
        ]
    hdulist = pyfits.HDUList()
    hdulist.append(pyfits.PrimaryHDU())
    tbhdu = pyfits.new_table(cols)
    tbhdu.name = 'data'
    hdulist.append(tbhdu)
    for k in bincols.keys():
        c = bincols[k]
        tbhdu = pyfits.new_table(c)
        tbhdu.name = '%s_simple_bins' % k
        hdulist.append(tbhdu)
        outfile = data_path + 'gz2sample_%s_abs_regions_bins.fits' % sname
    file_exists = os.path.isfile(outfile)
    if file_exists:
        os.remove(outfile)
    hdulist.writeto(outfile)
    p.close()
示例#12
0
    def _defineVariables(self):
        """
        Helper funtion to define pertinent variables from catalog data.
        """
        self.objid = self.data.field(self.config['catalog']['objid_field'])
        self.lon = self.data.field(self.config['catalog']['lon_field'])
        self.lat = self.data.field(self.config['catalog']['lat_field'])

        #if self.config['catalog']['coordsys'].lower() == 'cel' \
        #   and self.config['coords']['coordsys'].lower() == 'gal':
        #    logger.info('Converting catalog objects from CELESTIAL to GALACTIC cboordinates')
        #    self.lon, self.lat = ugali.utils.projector.celToGal(self.lon, self.lat)
        #elif self.config['catalog']['coordsys'].lower() == 'gal' \
        #   and self.config['coords']['coordsys'].lower() == 'cel':
        #    logger.info('Converting catalog objects from GALACTIC to CELESTIAL coordinates')
        #    self.lon, self.lat = ugali.utils.projector.galToCel(self.lon, self.lat)

        self.mag_1 = self.data.field(self.config['catalog']['mag_1_field'])
        self.mag_err_1 = self.data.field(self.config['catalog']['mag_err_1_field'])
        self.mag_2 = self.data.field(self.config['catalog']['mag_2_field'])
        self.mag_err_2 = self.data.field(self.config['catalog']['mag_err_2_field'])

        if self.config['catalog']['mc_source_id_field'] is not None:
            if self.config['catalog']['mc_source_id_field'] in self.data.names:
                self.mc_source_id = self.data.field(self.config['catalog']['mc_source_id_field'])
                logger.info('Found %i MC source objects'%(numpy.sum(self.mc_source_id > 0)))
            else:
                #ADW: This is pretty kludgy, please fix... (FIXME)
                columns_array = [pyfits.Column(name = self.config['catalog']['mc_source_id_field'],
                                               format = 'I',
                                               array = numpy.zeros(len(self.data)))]
                hdu = pyfits.new_table(columns_array)
                self.data = pyfits.new_table(pyfits.new_table(self.data.view(np.recarray)).columns + hdu.columns).data
                self.mc_source_id = self.data.field(self.config['catalog']['mc_source_id_field'])

        # should be @property
        if self.config['catalog']['band_1_detection']:
            self.mag = self.mag_1
            self.mag_err = self.mag_err_1
        else:
            self.mag = self.mag_2
            self.mag_err = self.mag_err_2
            
        # should be @property
        self.color = self.mag_1 - self.mag_2
        self.color_err = numpy.sqrt(self.mag_err_1**2 + self.mag_err_2**2)

        logger.info('Catalog contains %i objects'%(len(self.data)))
示例#13
0
    def testSaveOffsetsforSLR(self):

        zplist = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs([pyfits.Column(name = 'filter', format='20A', 
                                                                             array = self.filternames),
                                                               pyfits.Column(name = 'zeropoints', format='E', 
                                                                             array = self.orig_zps)])))

        saveSlrZP(cluster = 'testcluster', offsetFile = self.offsetFile, 
                  zplist = zplist, fluxtype = 'iso', myspec = 'custom',
                  photometry_db = self.db)

        self.assertEquals(len(self.db.slr), 3)

        self.assertEquals(sorted([slr.fitFilter for slr in self.db.slr]), sorted(self.slr_zps.keys()))

        for slr in self.db.slr:

            match = self.slr_zps[slr.fitFilter]
        
            self.assertEquals(slr.cluster, 'testcluster')
            self.assertEquals(slr.fitFilter, match.filter)
            self.assertTrue(np.abs(slr.zp - match.zp) < 0.001)
            self.assertTrue(np.abs(slr.zperr - match.zperr) < 0.001)
            self.assertEquals(slr.fluxtype, 'iso')
            self.assertEquals(slr.myspec, 'custom')
示例#14
0
def hitrandom(n,NS,FS,scaledata,bp):
	
	#Randomly creates 1 or 2 hits per time step
	n = np.random.randint(1,3)
	detpow = np.random.uniform(20,100,n) 
	meanpow = np.random.uniform(1,20,n)
	
	bzero3 = scaledata[0]
	bscale3 = scaledata[1]
	bzero4 = scaledata[2]
	bscale4 = scaledata[3]

	corchan = np.int16(np.array([i-bzero3 for i in np.random.randint(1,NS,size=n)]))
	finchan = np.int32(np.array([i-bzero4 for i in np.random.randint(1,FS,size=n)]))

	#ET Signal in a given beam
	beam = [0,1,2]
	if(bp in beam):	
		detpow = np.append(detpow,1200.0)
		detpow = np.append(detpow,1300.0)
		meanpow = np.append(meanpow,10.0)
		meanpow = np.append(meanpow,10.0)
		corchan = np.append(corchan,np.int16(125-bzero3))
		corchan = np.append(corchan,np.int16(225-bzero3))
		finchan= np.append(finchan,np.int32(0-bzero4))
		finchan= np.append(finchan,np.int32(0-bzero4))

	c1 = pyfits.Column(name='DETPOW',format='1E',array=detpow)
	c2 = pyfits.Column(name='MEANPOW',format='1E',array=meanpow)
	#c3 = pyfits.Column(name='COARCHAN',format='1I',array=corchan,bzero=bzero3,bscale=bscale3)
	c3 = pyfits.Column(name='COARCHAN',format='1I',array=corchan)
	#c4 = pyfits.Column(name='FINECHAN',format='1J',array=finchan,bzero=bzero4,bscale=bscale4)
	c4 = pyfits.Column(name='FINECHAN',format='1J',array=finchan)
	tbhdu = pyfits.new_table([c1, c2, c3, c4])
	return  tbhdu
示例#15
0
def write_cmd_file(near_targ, target):
    '''
  Takes the rec array of sources near target and the rec array of the target and produces a fits table.
  '''

    # Columns to be in the fits table: these data are for the nearby sources
    c1 = pyfits.Column(name='HSTID', format='20A', array=near_targ['hstid'])
    c2 = pyfits.Column(name='RA', format='F', array=near_targ['degra'])
    c3 = pyfits.Column(name='DEC', format='F', array=near_targ['degdec'])
    c4 = pyfits.Column(name='V', format='F', array=near_targ['v'])
    c5 = pyfits.Column(name='VERR', format='F', array=near_targ['verr'])
    c6 = pyfits.Column(name='BV', format='F', array=near_targ['bvcol'])
    c7 = pyfits.Column(name='BVERR', format='F', array=near_targ['bvcolerr'])
    c8 = pyfits.Column(name='VI', format='F', array=near_targ['vicol'])
    c9 = pyfits.Column(name='VIERR', format='F', array=near_targ['vicolerr'])

    # Make table
    table_hdu = pyfits.new_table([c1, c2, c3, c4, c5, c6, c7, c8, c9])

    # Updates header with contains the target's info
    table_hdu.header.update(key='HSTID', value=target['hstid'])
    table_hdu.header.update(key='LBTID', value=target['lbtid'])
    table_hdu.header.update(key='RA', value=str(target['ra']))
    table_hdu.header.update(key='DEC', value=str(target['dec']))

    # Table data cannot be the Primary HDU, so we make an empty Primary HDU
    phdu = pyfits.PrimaryHDU()

    # Zeroth extension is empty, first extension contains the table
    hdulist = pyfits.HDUList([phdu, table_hdu])
    hdulist.writeto(target['lbtid'] + '.fits')
示例#16
0
文件: iofits4.py 项目: bnikolic/oof
def Combine( flist , fout,
             overwrite=0):
    "Combines columns from various fits files"

    """flist needs to be of format ( (fname, col-prefix), ... ) """

    tabins= [ (pyfits.open( x[0] )[1], x[1]) for x in flist ]

    coldefs = []
    for tab,prefix in tabins:
        tabcds=tab.columns
        print tabcds.formats
        coldefs.extend( CopyColDefs( tab, prefix))

        
    tabout= pyfits.new_table( coldefs , nrows=len(tabins[0][0].data))

    for tab,prefix in tabins:
        tabcds=tab.columns
        for cname in tabcds.names:
            tabout.data.field(prefix+cname)._copyFrom(tab.data.field(cname))

    
    
    Write([pyfits.PrimaryHDU(), tabout],
          fout,
          overwrite=overwrite)
示例#17
0
  def save(self, pathname=None, sim_num=0, file_ext='fits', planet=None):
    if not pathname: pathname = '.'
    full_path = os.path.expanduser(os.path.join(pathname, str(sim_num), 'static'))
    try:
      os.makedirs(full_path)
    except os.error:
      pass
    
    filename = os.path.join(full_path, self.opt.name)
   
    if file_ext == 'fits':
      prihdr = pyfits.Header()
      prihdr['wavsol_0'] = (self.opt.ld().base[0], 'reference pixel wl')
      prihdr['wavsol_1'] = (self.opt.ld().base[1], '')
      prihdr['wavsol_2'] = (self.opt.ld().base[2], 'reference pixel')
      prihdr['BUNITS']   = "{:>18s}".format(str(self.fp.units))
      if planet:
	prihdr['NAME'] = ("{:>18s}".format(planet.planet.name), '')
	prihdr['T14'] = (float(planet.t14), str(planet.t14.units))
	prihdr['PERIOD'] = (float(planet.planet.P), 
			    str(planet.planet.P.units))
	
      fp_hdu = pyfits.PrimaryHDU(self.fp, header=prihdr)
      tb_hdu = pyfits.new_table(pyfits.ColDefs([
	pyfits.Column(name='wl', format='E', array=self.wl_solution),
	pyfits.Column(name='cr', format='E', array=self.planet.sed),
	pyfits.Column(name='star', format='E', array=self.star.sed)]))
	
      
      hdulist = pyfits.HDUList([fp_hdu, tb_hdu])
      hdulist.writeto(filename + '.' + file_ext, clobber=True)
    else:
      exolib.exosim_error('channel.save - file format not supported')
示例#18
0
def convertCosmos(inName, outName):
    inFile = open(inName, "r")
    table = asciitable.read(inFile, Reader=asciitable.FixedWidthTwoLine, delimiter='|', header_start=0,
                            data_start=4, data_end=-1)

    schema = pyfits.ColDefs([column for column in MAPPING.values()])
    outHdu = pyfits.new_table(schema, nrows=len(table))
    outData = outHdu.data

    for name, column in MAPPING.items():
        outData.field(column.name)[:] = table.field(name)

    for f in FILTERS:
        mag = outData.field(f)
        err = outData.field(f + "_err")
        indices = numpy.where(numpy.logical_or(mag < 0, mag > 50))
        mag[indices] = numpy.NAN
        err[indices] = numpy.NAN

    outHdu.writeto(outName, clobber=True)
    print "Wrote %s" % outName
    print "To create an astrometry.net catalogue, execute:"
    outBase = outName.replace(".fits", "")
    print "build-index -i %s -o %s_and_0.fits -I 77770 -P0 -n 100 -S r -L 20 -E -M -j 0.4" % (inName, outBase)
    for i in range(1, 5):
        print "build-index -1 %s_and_0.fits -o %s_and_%d.fits -I 7777%d -P%d -n 100 -S r -L 10 -E -M -j 0.4 &" % (outBase, outBase, i, i, i)
示例#19
0
def run_treecorr(x, y, g1, g2):
    """Helper routine to take outputs of GalSim shear grid routine, and run treecorr on it."""
    import pyfits
    import os
    import treecorr
    # Use fits binary table for faster I/O.
    assert x.shape == y.shape
    assert x.shape == g1.shape
    assert x.shape == g2.shape
    x_col = pyfits.Column(name='x', format='1D', array=x.flatten() )
    y_col = pyfits.Column(name='y', format='1D', array=y.flatten() )
    g1_col = pyfits.Column(name='g1', format='1D', array=g1.flatten() )
    g2_col = pyfits.Column(name='g2', format='1D', array=g2.flatten() )
    cols = pyfits.ColDefs([x_col, y_col, g1_col, g2_col])
    table = pyfits.new_table(cols)
    phdu = pyfits.PrimaryHDU()
    hdus = pyfits.HDUList([phdu,table])
    hdus.writeto('temp.fits',clobber=True)
    # Define the treecorr catalog object.
    cat = treecorr.Catalog('temp.fits',x_units='degrees',y_units='degrees',
                           x_col='x',y_col='y',g1_col='g1',g2_col='g2')
    # Define the corrfunc object
    gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, bin_size=0.1, sep_units='degrees')
    # Actually calculate the correlation function.
    gg.process(cat)
    os.remove('temp.fits')
    return gg
示例#20
0
def seeingtable(sdb, mintime, maxtime):
   #extract the seeing data from the sdb
   sel_cmd='DateTime, Mass, Dimm'
   tab_cmd='MassDimm'
   log_cmd="DateTime>'%s' and DateTime<'%s'" % (mintime, maxtime)
   see_rec=saltmysql.select(sdb, sel_cmd, tab_cmd, log_cmd)
   if len(see_rec)<2:  return None

   stime_list=[]
   mass_arr=np.zeros(len(see_rec))
   dimm_arr=np.zeros(len(see_rec))
   for i in range(len(see_rec)):
       stime_list.append(see_rec[i][0])
       mass_arr[i]=see_rec[i][1]
       dimm_arr[i]=see_rec[i][2]
 
   seecol=[]
   seecol.append(pyfits.Column(name='Timestamp', format='20A', array=stime_list))
   seecol.append(pyfits.Column(name='MASS', format='F', array=mass_arr ))
   seecol.append(pyfits.Column(name='DIMM', format='F', array=dimm_arr ))

   seetab= saltio.fitscolumns(seecol)
   seehdu= pyfits.new_table(seetab)
   seehdu.name='Seeing'
   return seehdu
示例#21
0
def guidertable(els, mintime, maxtime):
   """Extract the guider data from the els"""
   #extract the guider data from the els
   sel_cmd='_timestamp_, guidance_available, ee50, mag50'
   tab_cmd='tpc_guidance_status__timestamp '
   log_cmd="_timestamp_>'%s' and _timestamp_<'%s'" % (mintime, maxtime)
   gui_rec=saltmysql.select(els, sel_cmd, tab_cmd, log_cmd)
   if len(gui_rec)<2:  return None


   gtime_list=[]
   ee50_arr=np.zeros(len(gui_rec))
   mag50_arr=np.zeros(len(gui_rec))
   avail_list=[]
   for i in range(len(gui_rec)):
       gtime_list.append(gui_rec[i][0])
       ee50_arr[i]=gui_rec[i][2]
       mag50_arr[i]=gui_rec[i][3]
       avail_list.append(gui_rec[i][1])
   avail_arr=(np.array(avail_list)=='T')

   #write the results to a fits table
   guicol=[]
   guicol.append(pyfits.Column(name='Timestamp', format='20A', array=gtime_list))
   guicol.append(pyfits.Column(name='Available', format='L', array=avail_arr ))
   guicol.append(pyfits.Column(name='EE50', format='F', array=ee50_arr ))
   guicol.append(pyfits.Column(name='mag50', format='F', array=mag50_arr ))

   guitab= saltio.fitscolumns(guicol)
   guihdu= pyfits.new_table(guitab)
   guihdu.name='Guider'
   return guihdu
示例#22
0
def writeSpotFITS(spotDir, data):

    phdu = pyfits.PrimaryHDU()
    phdr = phdu.header
    phdr.update('pixscale', 0.001, 'mm/pixel')

    cols = []
    cols.append(pyfits.Column(name='fiberIdx',
                              format='I',
                              array=data['fiberIdx']))
    cols.append(pyfits.Column(name='wavelength',
                              format='D',
                              array=data['wavelength']))
    cols.append(pyfits.Column(name='spot_xc',
                              format='D',
                              array=data['spot_xc']))
    cols.append(pyfits.Column(name='spot_yc',
                              format='D',
                              array=data['spot_yc']))
    spots = data['spot'][:]
    spots.shape = (len(spots), 256*256)
    cols.append(pyfits.Column(name='spot',
                              format='%dE' % (256*256),
                              dim='(256,256)',
                              array=spots))
    colDefs = pyfits.ColDefs(cols)

    thdu = pyfits.new_table(colDefs)
    hdulist = pyfits.HDUList([phdu, thdu])

    hdulist.writeto(os.path.join(spotDir, 'spots.fits'), 
                    checksum=True, clobber=True)
示例#23
0
  def superaverage(self, weight='tintsys'):
    """
    Average together all the scans in this supertable.

    Note
    ====
    There is also a module function with this name which takes as its
    arguments a sequence of supertables

    @param weight : str::
      same as the ASAP argument for 'average_time'
    """
    mylogger.debug("Calling average_time for scan table")
    new_scantable = self.average_time(weight=weight)
    mylogger.debug("New scantable is %s", str(new_scantable))
    if new_scantable:
      new_head = self.header
      new_cols = self.cols
      new_hdu = pyfits.new_table(new_cols, header=new_head, nrows=1)
      # assume that the scan header info for the average is mostly the
      # same as for the first scan being averaged
      ASAP_scan_numbers = list(self.getscannos())
      mylogger.debug("ASAP scans: %s", str(ASAP_scan_numbers))
      for column_index in range(len(new_cols)):
        mylogger.debug("Processing column %d", column_index)
        new_hdu.data.field(column_index)[0] = \
           self.data.field(column_index)[0]
      # some data have changed in the averaging
      new_hdu.data.field('EXPOSURE')[0] = new_scantable.get_inttime(row=0)
      new_hdu.data.field('TSYS')[0]  = new_scantable.get_tsys()[0]
      new_hdu.data.field('DATA')[0] = array(new_scantable[0])
      return supertable(new_scantable, HDU=new_hdu)
    else:
      return None
示例#24
0
def ascii2fits(asciifile,asciinames=True,skip_header=0,outpath=None,fitsformat='D',verbose=True):
    """
    Convert ascii file into fits (see fits2ascii for the reverse command)

    --- INPUT ---
    asciifile        Ascii file to convert
    asciinames       Do the ascii file contain the column names in the header?
    skip_header      The number of header lines to skip when reading the ascii file.
    outpath          Alternative destination for the resulting fits file.

    --- EXAMPLE OF USE ---
    import fits2ascii as f2a
    outpath = '/Users/kschmidt/work/catalogs/'
    catfile = '/Users/kschmidt/work/GitHub/GLASS/ROMAN_CATALOGS/A2744/A2744_CLUSTER.cat'
    outputfile = f2a.ascii2fits(catfile,asciinames=True,skip_header=0,outpath=outpath,verbose=True)

    """
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Reading ascii file ',asciifile
    data    = np.genfromtxt(asciifile,names=asciinames,skip_header=skip_header,comments='#',dtype=None)
    keys    = data.dtype.names
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Initialize and fill dictionary with data'
    datadic = {}
    for kk in keys:
        datadic[kk] = []
        try:
            lenarr = len(np.asarray(data[kk]))
            datadic[kk] = np.asarray(data[kk])
        except: # if only one row of data is to be written
            datadic[kk] = np.asarray([data[kk]])

    if verbose: print ' - found the columns '+','.join(keys)

    if len(fitsformat) != len(keys):
        fitsformat = np.asarray([fitsformat]*len(keys))
    #-------------------------------------------------------------------------------------------------------------
    # writing to fits table
    tail = asciifile.split('.')[-1]# reomove extension
    outputfile = asciifile.replace('.'+tail,'.fits')
    if outpath != None:
        outputfile = outpath+outputfile.split('/')[-1]

    columndefs = []
    for kk, key in enumerate(keys):
        try:
            columndefs.append(pyfits.Column(name=key  , format=fitsformat[kk], array=datadic[key]))
        except:
            print ' ----ERROR---- in defining columns for fits file --> stopping with pdb.set_trace() to invest'
            pdb.set_trace()


    cols     = pyfits.ColDefs(columndefs)
    tbhdu    = pyfits.new_table(cols)          # creating table header
    hdu      = pyfits.PrimaryHDU()             # creating primary (minimal) header
    thdulist = pyfits.HDUList([hdu, tbhdu])    # combine primary and table header to hdulist
    thdulist.writeto(outputfile,clobber=True)  # write fits file (clobber=True overwrites excisting file)
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Wrote the data to: ',outputfile
    return outputfile
示例#25
0
def createFitsTable(pathPriorFile, pathSaveFile):
    c1 = Column(name='x', format='D')
    c2 = Column(name='y', format='D')
    c3 = Column(name='magnitude', format='D')
    col_definitions = pyfits.ColDefs([c1, c2, c3])

    x, y, m, r, f = np.loadtxt(pathPriorFile,
                               usecols=(0, 1, 2, 3, 6),
                               unpack=True)

    sel = np.invert([((f == 0) & (r == 0)) | (m > 24.5)][0])

    xsel = x[sel]
    ysel = y[sel]
    msel = m[sel]

    image_hdu = pyfits.new_table(col_definitions, nrows=len(xsel))

    # for c, tiled_position in enumerate(zip(xsel, ysel, msel)):
    for index, (value1, value2, value3) in enumerate(zip(xsel, ysel, msel)):
        image_hdu.data[index] = [((value1)), ((value2)), (value3)]

    sys.stdout = open(os.devnull, "w")
    image_hdu.writeto(pathSaveFile, clobber=True)
    sys.stdout = sys.__stdout__
示例#26
0
def cl2fits(cl, filename, lcut):
    """cl2fits(cl, filename, lcut)"""

    table=[pyf.Column(name='TEMPERATURE',format='1D',array=cl[0:lcut+1])]
    #print table
    tbhdu=pyf.new_table(table)
    tbhdu.writeto(filename, clobber=True)
示例#27
0
def select_columns(tbhdu, *fieldnames): 
    """
    Select particular columns from given table
    
    A new table with only the asked columns ('fieldnames')
    is output.

    Input:
     - tbhdu : pyfits.open('data.fit')[?]
        Table HDU, often "?" equals 1
     - cols : str,
        Comma separated list of variables to be read from 'hdulist'

    Output:
     -> (new) BinTableHDU, with just the selected fields
    
    ---
    """
	
    coldefs = tbhdu.columns;
    tbdata = tbhdu.data;
    inds = [ tbdata.names.index(id.upper()) for id in fieldnames ];
    cols = [];
    for i in inds:
        cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=tbdata.field(i)) );
    coldefs = pyfits.ColDefs(cols);
    
    return pyfits.new_table(coldefs);
示例#28
0
 def to_fits(self, filename, format=None, clobber=False, history=''):
     if format is None:
         format = healpix.default_fits_format_codes[self.get_dtype().type]
     hdu0 = pyfits.PrimaryHDU()
     col0 = pyfits.Column(name='signal', format=format, array=self.map.map)
     col1 = pyfits.Column(name='weights', format=format, array=self.wgt.map)
     col_inds = [
         pyfits.Column(name='sp_index%d' % n, format=format, array=i.map)
         for n, i in enumerate(self.ind)
     ]
     cols = pyfits.ColDefs([col0, col1] + col_inds)
     tbhdu = pyfits.new_table(cols)
     self.map._set_fits_header(tbhdu.header)
     hdulist = pyfits.HDUList([hdu0, tbhdu])
     if history != '':
         history = [h.strip() for h in history.split("\n")]
         for line in history:
             if len(line) > 1:
                 if line.startswith('#'):
                     for subline in img.word_wrap(line, 80, 0, 0,
                                                  '').split("\n"):
                         hdulist[0].header.add_history(subline)
                 else:
                     for subline in img.word_wrap(line, 70, 5, 10,
                                                  '#').split("\n"):
                         hdulist[0].header.add_history(subline)
     hdulist.writeto(filename, clobber=clobber)
示例#29
0
def extend_tbHDU(tbhdu_A, tbhdu_B):
    """
    Extend first tbHDU with second entries

    The output is a table (HDU) with column 'B' lines
    extending 'A' entries. Column names on both input
    table need to be the same.
    
    Input:
     - tbhdu_A : FITS binary table HDU
     - tbhdu_B : FITS binary table HDU
    
    Output:
     - tbhdu : FITS binary table HDU
        Result from extention, A+B
    
    ---
    """
    
    Nrows_A = tbhdu_A.header['NAXIS2'];
    Nrows_B = tbhdu_B.header['NAXIS2'];
    Nrows = Nrows_A + Nrows_B;
    
    new_tb = pyfits.new_table(tbhdu_A.columns,nrows=Nrows);
    
    for name in tbhdu_A.columns.names:
        new_tb.data.field(name)[Nrows_A:] = tbhdu_B.data.field(name);
    
    return new_tb;
示例#30
0
  def new_HDU(self,scan_list):
    """
    Make a new HDU from the ASAP scans in 'scan_list'

    @param scan_list : list of ints::
      the scans numbers used to select scans from the scantable

    @return:
      an SDFITS bintable HDU
    """
    # The header and column definitions stay the same
    new_head = self.header
    new_cols = self.cols
    num_rows = len(scan_list)
    new_hdu = pyfits.new_table(new_cols, header=new_head, nrows=num_rows)
    # Make sure that the new scan list is ordered
    scan_list.sort()
    old_scan_list = list(self.getscannos())
    new_row_index = 0
    for scan in scan_list:
      old_scan_index = old_scan_list.index(scan)
      for i in range(len(new_cols)):
        new_hdu.data.field(i)[new_row_index] = \
          self.data.field(i)[old_scan_index]
      new_row_index += 1
    return new_hdu
示例#31
0
def write_fits(magpat, fits_output_file):
    """Save a magnification pattern to a FITS file.

    The pattern itself is saved in the pimary HDU of the FITS file.
    The coordinates of the source plane rectangle occupied by the
    pattern are stored in the header fields

        MAGPATX0, MAGPATY0, MAGPATX1, MAGPATY1

    The lens list is stored in a binary table HDU named "LENSES".

    Parameters:

        magpat           magnification pattern to save
        fits_output_file
                         file name of the output file
    """
    img_hdu = pyfits.PrimaryHDU(magpat)
    region = magpat.region
    img_hdu.header.update("ctype1", " ")
    img_hdu.header.update("crpix1", 0.5)
    img_hdu.header.update("crval1", region.x)
    img_hdu.header.update("cdelt1", region.width / magpat.params.xpixels)
    img_hdu.header.update("ctype2", " ")
    img_hdu.header.update("crpix2", 0.5)
    img_hdu.header.update("crval2", region.y)
    img_hdu.header.update("cdelt2", region.height / magpat.params.ypixels)
    for s in ["x0", "y0", "x1", "y1"]:
        img_hdu.header.update("magpat" + s, getattr(region, s))
    lens_hdu = pyfits.new_table(magpat.lenses)
    lens_hdu.name = "lenses"
    pyfits.HDUList([img_hdu, lens_hdu]).writeto(fits_output_file, clobber=True)
    utils.logger.info("Wrote magnification pattern to %s", fits_output_file)
示例#32
0
def mwrfits(filename, data, hdu=1, colnames=None, keys=None):
    """Write columns to a fits file in a table extension.

    Input:
      - filename: the fits file name
      - data: a list of 1D arrays to write in the table
    Parameters:
      - hdu: header where to write the data. Default: 1
      - colnames: the column names
      - keys: a dictionary with keywords to write in the header
    """
    # Check the inputs
    if colnames is not None:
        if len(colnames) != len(data):
            raise ValueError("colnames and data must the same length")
    else:
        colnames = [""] * len(data)
    cols = []
    for line in xrange(len(data)):
        cols.append(pyf.Column(name=colnames[line], format=getformat(data[line]), array=data[line]))
    coldefs = pyf.ColDefs(cols)
    tbhdu = pyf.new_table(coldefs)
    if type(keys) is dict:
        for k, v in keys.items():
            tbhdu.header.update(k, v)
    # write the file
    tbhdu.writeto(filename, clobber=True)
示例#33
0
文件: iofits4.py 项目: bnikolic/oof
def Select(dirin, filein , selfn, dirout,
           overwrite=False):
    "Select a subsample from ma table "

    """
    If overwrite is False, will skip existing files in destination
    directory.
    """

    mask=[ selfn(row) for row in pyfits.open(filein)[1].data ]
    mind=[ x for x,f in  enumerate (mask) if f]
    nrows=len(mind)

    print "Selected %i rows." % nrows
    
    fitsel=re.compile(".*fits?")
    flist = [ fnamein for fnamein in os.listdir(dirin) if fitsel.match(fnamein) ]
    
    for fnamein in flist:
        foutname=os.path.join(dirout,fnamein)

        if os.access(foutname, os.F_OK) and (not overwrite):
            print "Skipping %s as it already exists" % fnamein
        else:
            fin=pyfits.open(os.path.join(dirin,fnamein))

            newtab=pyfits.new_table( fin[1].columns , nrows= nrows)
            for cname in fin[1].columns.names:
                newtab.data.field(cname)._copyFrom( fin[1].data.field(cname)[ mind] )

        
            Write([pyfits.PrimaryHDU(), newtab],
                  foutname,
                  overwrite=1)
示例#34
0
def subsetSchlafly(inName, outName):
    inFile = pyfits.open(inName)
    inData = inFile[1].data

    schema = pyfits.ColDefs([pyfits.Column(name="id", format="K"),
                             pyfits.Column(name="ra", format="D"),
                             pyfits.Column(name="dec", format="D")] +
                            [pyfits.Column(name=name, format="E") for name in FILTERS] +
                            [pyfits.Column(name=name + "_err", format="E") for name in FILTERS] +
                            [pyfits.Column(name=name + "_stdev", format="E") for name in FILTERS]
                            )

    outHdu = pyfits.new_table(schema, nrows=len(inData))
    outData = outHdu.data

    outData.ident = inData.obj_id
    outData.ra = inData.ra
    outData.dec = inData.dec
    for i, f in enumerate(FILTERS):
        # Some of the below (e.g., "mean") are functions in the pyfits.FITS_rec class,
        # so we need to access them differently than just grabbing an attribute.
        mean = outData.field(f)
        err = outData.field(f + "_err")
        stdev = outData.field(f + "_stdev")

        mean[:] = inData.field("mean")[:,i]
        err[:] = inData.field("err")[:,i]
        stdev[:] = inData.field("stdev")[:,i]

    outHdu.writeto(outName, clobber=True)
    print "Wrote %s" % outName
    inFile.close()
示例#35
0
文件: iofits4.py 项目: bnikolic/oof
def RepTable ( tabin ,
               rowmask=None ,
               colsel=None,
               keycopylist = ["dz"]
               ):

    "Replicate a fits table"


    
    if rowmask ==None:
        rowmask = numpy.ones(len(tabin.data),
                             numpy.bool)

    nrowsout=sum(rowmask)

    
    tabout=pyfits.new_table( CopyColDefs(tabin),
                             nrows=nrowsout)

    for k in keycopylist:
        if k in tabin.header.keys():
            tabout.header.update ( k , tabin.header[k] )

    if colsel == None:
        colsel = tabin.columns.names

    if nrowsout > 0 :
        for cname in colsel:
            tabout.data[cname]=tabin.data.field(cname)[ rowmask]

    return tabout
示例#36
0
文件: search.py 项目: norashipp/ugali
    def writeCandidates(self, filename=None):
        if filename is None: filename = self.candfile

        threshold = self.config['search']['cand_threshold']
        select = (self.assocs['CUT'] == 0)
        select &= (self.assocs['TS'] > threshold)
        #select &= (self.assocs['ASSOC2']=='')

        self.candidates = self.assocs[select]
        # ADW: View as a recarray or selection doesn't work.
        # Why? I don't know, and I'm slightly terrified...
        hdu = pyfits.new_table(self.candidates.view(np.recarray))
        logger.info("Writing %s..." % filename)
        hdu.writeto(filename, clobber=True)

        # Dump to txt file
        if which('fdump'):
            txtfile = filename.replace('.fits', '.txt')
            columns = ['NAME', 'TS', 'GLON', 'GLAT', 'DISTANCE', 'MASS']
            cmd = 'fdump %(infile)s %(outfile)s columns="%(columns)s" rows="-" prhead="no" showcol="yes" clobber="yes" pagewidth="256" fldsep=" " showrow="no"' % (
                dict(infile=filename,
                     outfile=txtfile,
                     columns=','.join(columns)))
            print cmd
            subprocess.call(cmd, shell=True)
示例#37
0
    def parsePDZ(cls, pdzfile, sourcefile):
        '''parses text output from ZEBRA'''

        input = open(pdzfile)

        source = ldac.openObjectFile(sourcefile)

        minPDZ, maxPDZ, pdzstep = 0.0, 4.005, 0.005

        pdzs = []
        for line in input.readlines():
            if re.match('^#', line):
                continue
            tokens = line.split()
            pdz = map(float, tokens)

            pdzs.append(pdz)

        nobjects = len(pdzs)
        npdzs = len(np.arange(minPDZ, maxPDZ, pdzstep))

        ids = source['SeqNr']

        cols = [pyfits.Column(name = 'SeqNr', format = 'J', array = ids),
                pyfits.Column(name = 'pdz', format = '%dE' % npdzs, array = np.array(pdzs))]

        pdzs = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs(cols)))

        pdzs.hdu.header.update('MINPDZ', minPDZ)
        pdzs.hdu.header.update('MAXPDZ', maxPDZ)
        pdzs.hdu.header.update('PDZSTEP', pdzstep)

        
        return cls(pdzs)
示例#38
0
def sex2fits(c, fitsname, booleancols=[]):
    """	usage: sex2fits(c, fitsname, booleancols=[])
	c -- input sextutils.sextractor catalog instance
	fitsname -- output file name of the binary FITS table
	booleancols -- column names that should be converted to boolean values"""
    fitscols = []
    # construct all the columns
    for i in range(len(c._d)):
        colname = c._colnames[i]
        coltype = c._type[colname]
        colfmt = colformats[coltype]
        if coltype == 's':
            slen = c._fmt[colname][1:-1]  # length of string
            colfmt = slen + colfmt
        colarray = c.__getattribute__(colname)
        # catch the Boolean array of 0 or 1
        if colname in booleancols:
            colfmt = 'L'
            colarray = where(colarray == 0, False,
                             True)  # convert to boolean array
        col = pyfits.Column(name=colname, format=colfmt, array=colarray)
        fitscols += [col]
    # create table header unit
    cols = pyfits.ColDefs(fitscols)
    tbhdu = pyfits.new_table(cols)
    hdu = pyfits.PrimaryHDU(array(
        []))  # create a primary HDU with an empty list
    thdulist = pyfits.HDUList([hdu, tbhdu])
    thdulist.writeto(fitsname)
    return 0
示例#39
0
def saveClass(out,filename):
    """
    NAME:
       saveClass
    PURPOSE:
       save the classifications
    INPUT:
       out - listof varClass objects key, qsologlike, starloglike)
       filename - name of the file that the output will be saved to
    OUTPUT:
       (none)
    HISTORY:
       2011-01-30 - Written - Bovy (NYU)
    """
    key= [re.split(r'.fit',o.key)[0] for o in out]
    lenkey= [len(k) for k in key]
    #Prepare columns
    cols= []
    colkey= pyfits.Column(name='key',format=str(max(lenkey))+'A',array=key)
    cols.append(colkey)
    colqso= pyfits.Column(name='qsologlike',format='E',
                          array=[o.qsologlike for o in out])
    cols.append(colqso)
    colstar= pyfits.Column(name='starloglike',format='E',
                           array=[o.starloglike for o in out])
    cols.append(colstar)
    colrrlyrae= pyfits.Column(name='rrlyraeloglike',format='E',
                              array=[o.rrlyraeloglike for o in out])
    cols.append(colrrlyrae)
    #Save
    columns= pyfits.ColDefs(cols)  
    tbhdu= pyfits.new_table(columns)
    tbhdu.writeto(filename)

    return None
示例#40
0
def change_column_names(filename, old_colnames, new_colnames):
    """
	Change the name of a column.
	Pyfits does not really provide a convenient function to do this, so I'll have to 
	create a new table based on the old table, just use a different column name.
	"""
    os.system('mv %s %s.copy' % (filename, filename))
    c = pyfits.open(filename + ".copy")
    tbhdu = c[1]
    ncol = len(tbhdu.data.columns)
    newcols = []
    for i in range(ncol):
        colname = tbhdu.data.columns[i].name
        colfmt = tbhdu.data.formats[i]
        colarr = tbhdu.data.field(colname)
        for j in range(len(old_colnames)):
            if tbhdu.data.columns[i].name == old_colnames[j]:
                colname = new_colnames[j]
                break
                #print colname
        newcols += [pyfits.Column(name=colname, format=colfmt, array=colarr)]
    newcols = pyfits.ColDefs(newcols)
    #print newcols
    newhdu = pyfits.new_table(newcols)
    newhdu.writeto(filename)
    c.close()
    os.system('rm %s.copy' % filename)
示例#41
0
def mwrfits(filename,data,hdu=1,colnames=None,keys=None):
    """Write columns to a fits file in a table extension.

    Parameters
    ----------
    filename : str
      The fits file name
    data : list of 1D arrays
      A list of 1D arrays to write in the table
    hdu : int, optional
      The header where to write the data. Default: 1
    colnames : list of str
      The column names
    keys : dict-like
      A dictionary with keywords to write in the header
    """
    # Check the inputs
    if colnames is not None:
        if len(colnames) != len(data):
            raise ValueError("colnames and data must the same length")
    else:
        colnames = ['']*len(data)
    cols=[]
    for line in xrange(len(data)):
        cols.append(pf.Column(name=colnames[line],
                               format=getformat(data[line]),
                               array=data[line]))
    tbhdu = pf.new_table(cols)
    if type(keys) is dict:
        for k,v in keys.items():
            tbhdu.header.update(k,v)
    # write the file
    tbhdu.writeto(filename,clobber=True)
示例#42
0
def write_cl(filename, cl, dtype=np.float64):
    """Writes Cl into an healpix file, as IDL cl2fits.

    Parameters
    ----------
    filename : str
      the fits file name
    cl : array
      the cl array to write to file, currently TT only
    """
    # check the dtype and convert it
    fitsformat = getformat(dtype)
    column_names = ['TEMPERATURE','GRADIENT','CURL','G-T','C-T','C-G']
    if isinstance(cl, list):
        cols = [pf.Column(name=column_name,
                               format='%s'%fitsformat,
                               array=column_cl) for column_name, column_cl in zip(column_names[:len(cl)], cl)]
    else: # we write only one TT
        cols = [pf.Column(name='TEMPERATURE',
                               format='%s'%fitsformat,
                               array=cl)]
            
    tbhdu = pf.new_table(cols)
    # add needed keywords
    tbhdu.header.update('CREATOR','healpy')
    tbhdu.writeto(filename,clobber=True)
示例#43
0
def export_dpc_balm_to_fits(objprefix, fitsfname):
    import pyfits
    import piolib

    ix = piolib.ReadVECTObject(objprefix + '_idx', "PIOINT", "")
    re = piolib.ReadVECTObject(objprefix + '_re', "PIOFLOAT", "")
    im = piolib.ReadVECTObject(objprefix + '_im', "PIOFLOAT", "")

    l = np.floor(np.sqrt(ix - 1))
    m = ix - l * l - l - 1

    lmax = np.int(np.max(l))
    mmax = np.int(np.max(m))

    hdu = pyfits.new_table([
        pyfits.Column(name='INDEX', format=r'1J', array=ix),
        pyfits.Column(name='REAL', format=r'1D', array=re),
        pyfits.Column(name='IMAG', format=r'1D', array=im)
    ])

    hdu.header.update('MAX-LPOL', lmax, "Maximum L multipole order")
    hdu.header.update('MAX-MPOL', mmax, "Maximum M multipole degree")
    hdu.header.update('SOURCE', objprefix)
    hdu.name = 'ALM'

    hdu.writeto(fitsfname)
示例#44
0
def beam_to_fits(filename, beam):
    import pyfits
    cols = pyfits.ColDefs([
        pyfits.Column(name='beam', format='D16.8', array=beam)], tbtype='TableHDU')
    tab = pyfits.new_table(cols, tbtype='TableHDU')
    hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tab])
    hdulist.writeto(filename)
示例#45
0
def sort_by_column(tbhdu,fieldname):


    """
    Sort a FITS table HDU by the its "fieldname" column in increasing order. 
    
    Inputs:
     - tbhdu: FITS table HDU
     - fieldname <str> : field name of the column to sort

    Output:
     - new tbhdu with data sorted according to 'fieldname' column
    
    """
    from operator import itemgetter, attrgetter

    coldefs = tbhdu.columns
    tbdata = tbhdu.data
    index = tbdata.names.index(fieldname)
	
    sorted_data = np.transpose(sorted(tbdata,key=itemgetter(index)))


    cols = [];
    for i in xrange(len(coldefs.names)):
        cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=sorted_data[i]) );
    coldefs = pyfits.ColDefs(cols);
    
    return pyfits.new_table(coldefs);
示例#46
0
def convert_to_pogson(p):

    import pyfits, scipy

    cols = []
    for col in p.columns:
        if col.name[0:7] == 'psfFlux':
            array = -2.5 * scipy.log10(p.data.field(col.name)) + 22.5
            cols.append(
                pyfits.Column(name=col.name.replace('psfFlux', 'psfPog'),
                              format=col.format,
                              array=array))
        cols.append(col)

    hdu = pyfits.PrimaryHDU()
    hdulist = pyfits.HDUList([hdu])
    print cols
    tbhu = pyfits.new_table(cols)
    #hdulist.append(tbhu)
    #hdulist[1].header.update('EXTNAME','STDTAB')
    #outcat = '/tmp/test' #path + 'PHOTOMETRY/' + type + '.cat'
    #os.system('rm ' + f + '.tab')
    #hdulist.writeto(f + '.tab')

    return tbhu
def writeFits (taustar, transmission, filename) :
    tauCol = pyfits.Column (name='tau', format = 'E', array = taustar)
    transmissionCol = pyfits.Column (name='transmission', format='E', 
                                     array=transmission)
    columns = pyfits.ColDefs ([tauCol, transmissionCol])
    tableHDU = pyfits.new_table (columns)
    tableHDU.writeto (filename)
示例#48
0
    def convert(self, inName, outName):
        """Convert input data to the format to be processed by astrometry.net"""
        if os.path.exists(outName):
            print "Output file %s exists; not clobbering" % outName
            return
        inFile = pyfits.open(inName)
        inData = inFile[1].data
        print "Read %d rows from %s" % (len(inData), inName)

        # Filter the data and get the columns we want
        columns = self.filter(inData)

        if not "ra" in self.schema or not "dec" in self.schema:
            raise RuntimeError("Don't have 'ra' and 'dec' columns in schema")

        size = None
        for col in self.schema:
            if not col in columns:
                raise RuntimeError("Schema column %s was not present after filtering" % col)
            if size is None:
                size = len(columns[col])
            elif len(columns[col]) != size:
                raise RuntimeError("Size mismatch for column %s: %d vs %d" % (col, len(columns[col], size)))

        # Write it all out
        schema = pyfits.ColDefs([pyfits.Column(name=col, format=self.schema[col]) for col in self.schema])
        outHdu = pyfits.new_table(schema, nrows=size)
        outData = outHdu.data

        for col in self.schema:
            outData.field(col)[:] = columns[col]

        outHdu.writeto(outName, clobber=True)
        print "Wrote %d rows as %s" % (size, outName)
        inFile.close()
示例#49
0
def mwrfits(filename,data,hdu=1,colnames=None,keys=None):
    """Write columns to a fits file in a table extension.

    Parameters
    ----------
    filename : str
      The fits file name
    data : list of 1D arrays
      A list of 1D arrays to write in the table
    hdu : int, optional
      The header where to write the data. Default: 1
    colnames : list of str
      The column names
    keys : dict-like
      A dictionary with keywords to write in the header
    """
    # Check the inputs
    if colnames is not None:
        if len(colnames) != len(data):
            raise ValueError("colnames and data must the same length")
    else:
        colnames = ['']*len(data)
    cols=[]
    for line in xrange(len(data)):
        cols.append(pf.Column(name=colnames[line],
                               format=getformat(data[line]),
                               array=data[line]))
    tbhdu = pf.new_table(cols)
    if type(keys) is dict:
        for k,v in keys.items():
            tbhdu.header.update(k,v)
    # write the file
    tbhdu.writeto(filename,clobber=True)
示例#50
0
def write_fits(magpat, fits_output_file):
    """Save a magnification pattern to a FITS file.

    The pattern itself is saved in the pimary HDU of the FITS file.
    The coordinates of the source plane rectangle occupied by the
    pattern are stored in the header fields

        MAGPATX0, MAGPATY0, MAGPATX1, MAGPATY1

    The lens list is stored in a binary table HDU named "LENSES".

    Parameters:

        magpat           magnification pattern to save
        fits_output_file
                         file name of the output file
    """
    img_hdu = pyfits.PrimaryHDU(magpat)
    region = magpat.region
    img_hdu.header.update("ctype1", " ")
    img_hdu.header.update("crpix1", 0.5)
    img_hdu.header.update("crval1", region.x)
    img_hdu.header.update("cdelt1", region.width / magpat.params.xpixels)
    img_hdu.header.update("ctype2", " ")
    img_hdu.header.update("crpix2", 0.5)
    img_hdu.header.update("crval2", region.y)
    img_hdu.header.update("cdelt2", region.height / magpat.params.ypixels)
    for s in ["x0", "y0", "x1", "y1"]:
        img_hdu.header.update("magpat" + s, getattr(region, s))
    lens_hdu = pyfits.new_table(magpat.lenses)
    lens_hdu.name = "lenses"
    pyfits.HDUList([img_hdu, lens_hdu]).writeto(fits_output_file, clobber=True)
    utils.logger.info("Wrote magnification pattern to %s", fits_output_file)
示例#51
0
def run_treecorr(x, y, g1, g2):
    """Helper routine to take outputs of GalSim shear grid routine, and run treecorr on it."""
    import pyfits
    import os
    import treecorr
    # Use fits binary table for faster I/O.
    assert x.shape == y.shape
    assert x.shape == g1.shape
    assert x.shape == g2.shape
    x_col = pyfits.Column(name='x', format='1D', array=x.flatten())
    y_col = pyfits.Column(name='y', format='1D', array=y.flatten())
    g1_col = pyfits.Column(name='g1', format='1D', array=g1.flatten())
    g2_col = pyfits.Column(name='g2', format='1D', array=g2.flatten())
    cols = pyfits.ColDefs([x_col, y_col, g1_col, g2_col])
    table = pyfits.new_table(cols)
    phdu = pyfits.PrimaryHDU()
    hdus = pyfits.HDUList([phdu, table])
    hdus.writeto('temp.fits', clobber=True)
    # Define the treecorr catalog object.
    cat = treecorr.Catalog('temp.fits',
                           x_units='degrees',
                           y_units='degrees',
                           x_col='x',
                           y_col='y',
                           g1_col='g1',
                           g2_col='g2')
    # Define the corrfunc object
    gg = treecorr.GGCorrelation(min_sep=min_sep,
                                max_sep=max_sep,
                                bin_size=0.1,
                                sep_units='degrees')
    # Actually calculate the correlation function.
    gg.process(cat)
    os.remove('temp.fits')
    return gg
示例#52
0
def merge_tables(table1, table2, newtabname, mode='left'):
    """	usage: merge_tables(table1, table2, newtabname, mode='left')
	table1, table2 are two fits tables (Ftable instances) to be merged.
	mode:
	'left': only merge columns that are in table1; ignore other columns in table2.
	'right': only merge columns that are in table2; ignore other columns in table1.
	"""
    columns = []
    if mode == 'left':
        for i in range(len(table1.Columns)):
            col = table1.Columns[i]
            if col in table2.Columns:  # if col is also in table2
                a1 = table1.__getitem__(col)
                a2 = table2.__getitem__(col)
                a_all = concatenate([a1, a2])
                fmt = table1.d.formats[i]
                columns += [pyfits.Column(name=col, format=fmt, array=a_all)]
    elif mode == 'right':
        for i in range(len(table2.Columns)):
            col = table2.Columns[i]
            if col in table1.Columns:  # if col is also in table1
                a2 = table2.__getitem__(col)
                a1 = table1.__getitem__(col)
                a_all = concatenate([a1, a2])
                fmt = table2.d.formats[i]
                columns += [pyfits.Column(name=col, format=fmt, array=a_all)]
    cols = pyfits.ColDefs(columns)
    tbhdu = pyfits.new_table(cols)
    tbhdu.writeto(newtabname)
示例#53
0
def write_cmd_file(near_targ, target):
  '''
  Takes the rec array of sources near target and the rec array of the target and produces a fits table.
  '''

  # Columns to be in the fits table: these data are for the nearby sources
  c1 = pyfits.Column(name='HSTID', format='20A', array=near_targ['hstid'])
  c2 = pyfits.Column(name='RA', format='F', array=near_targ['degra'])
  c3 = pyfits.Column(name='DEC', format='F', array=near_targ['degdec'])
  c4 = pyfits.Column(name='V', format='F', array=near_targ['v'])
  c5 = pyfits.Column(name='VERR', format='F', array=near_targ['verr'])
  c6 = pyfits.Column(name='BV', format='F', array=near_targ['bvcol'])
  c7 = pyfits.Column(name='BVERR', format='F', array=near_targ['bvcolerr'])
  c8 = pyfits.Column(name='VI', format='F', array=near_targ['vicol'])
  c9 = pyfits.Column(name='VIERR', format='F', array=near_targ['vicolerr'])

  # Make table
  table_hdu = pyfits.new_table([c1, c2, c3, c4, c5, c6, c7, c8, c9])


  # Updates header with contains the target's info
  table_hdu.header.update(key='HSTID', value=target['hstid'])
  table_hdu.header.update(key='LBTID', value=target['lbtid'])
  table_hdu.header.update(key='RA', value=str(target['ra']))
  table_hdu.header.update(key='DEC', value=str(target['dec']))     

  # Table data cannot be the Primary HDU, so we make an empty Primary HDU
  phdu = pyfits.PrimaryHDU()

  # Zeroth extension is empty, first extension contains the table  
  hdulist = pyfits.HDUList([phdu, table_hdu])
  hdulist.writeto(target['lbtid']+'.fits')
示例#54
0
def makeHDU(config,mag_1,mag_err_1,mag_2,mag_err_2,lon,lat,mc_source_id):
    """
    Create a catalog fits file object based on input data.

    ADW: This should be combined with the write_membership function of loglike.
    """

    if config['catalog']['coordsys'].lower() == 'cel' \
       and config['coords']['coordsys'].lower() == 'gal':
        lon, lat = ugali.utils.projector.gal2cel(lon, lat)
    elif config['catalog']['coordsys'].lower() == 'gal' \
       and config['coords']['coordsys'].lower() == 'cel':
        lon, lat = ugali.utils.projector.cel2gal(lon, lat)

    columns = [
        pyfits.Column(name=config['catalog']['objid_field'],
                      format = 'D',array = np.arange(len(lon))),
        pyfits.Column(name=config['catalog']['lon_field'],
                      format = 'D',array = lon),
        pyfits.Column(name = config['catalog']['lat_field'],          
                      format = 'D',array = lat), 
        pyfits.Column(name = config['catalog']['mag_1_field'],        
                      format = 'E',array = mag_1),
        pyfits.Column(name = config['catalog']['mag_err_1_field'],    
                      format = 'E',array = mag_err_1),
        pyfits.Column(name = config['catalog']['mag_2_field'],        
                      format = 'E',array = mag_2),
        pyfits.Column(name = config['catalog']['mag_err_2_field'],    
                      format = 'E',array = mag_err_2),
        pyfits.Column(name = config['catalog']['mc_source_id_field'], 
                      format = 'I',array = mc_source_id),
    ]

    hdu = pyfits.new_table(columns)
    return hdu
示例#55
0
def addPDvals(filename,pdfile,extnam,prefix,tstart):
    if (os.path.getsize(filename) < 35000000):
        print "File %s appears to be bogus." % filename
        return 0
    hdulist = pf.open(filename, mode='update')
    tmdata = numpy.zeros((3000), dtype=numpy.float64)
    pddata = numpy.zeros((3000), dtype=numpy.float64)
#    fpd = open("%s/pd-values-for-seq-%d-exp-%d" % (cdir,seq,i),"r");
    fpd = open(pdfile,"r");
    ival = 0
    for line in fpd:
        tokens = str.split(line)
        pdval = float(tokens[1])
        pdtime = float(tokens[0])
        print "time = %10.4e , pdval = %10.4e" % (pdtime,pdval)
        pddata[ival] = pdval
        tmdata[ival] = pdtime
        ival = ival + 1
    fpd.close()
#    hdulist.append(pf.BinTableHDU(data=(tmdata,pddata)))
    c1 = pf.Column(name="%s_MEAS_TIMES" % prefix, format='D', array=tmdata)
    c2 = pf.Column(name="%s_A_CURRENT" % prefix, format="D", array=pddata)
    table_hdu = pf.new_table([c1, c2])
    hdulist.append(table_hdu)

    for seg in hdulist :
        hdr=seg.header

    hdr.update("EXTNAME", extnam)
    hdr.update("TSTART", tstart)

    hdulist.close()
示例#56
0
    def write(self, file_name) :
        """Write stored data to file.
        
        Take all the data stored in the Writer (from added DataBlocks) and
        write it to a fits file with the passed file name.
        """

        # Add the data
        Col = pyfits.Column(name='DATA', format=self.data_format, 
                            array=self.data)
        columns = [Col,]
        
        # Add all the other stored fields.
        for field_name in self.field.iterkeys() :
            Col = pyfits.Column(name=field_name,
                                format=self.formats[field_name],
                                array=self.field[field_name])
            columns.append(Col)
        coldefs = pyfits.ColDefs(columns)
        # Creat fits header data units, one for the table and the mandatory
        # primary.
        tbhdu = pyfits.new_table(coldefs)
        prihdu = pyfits.PrimaryHDU()
        # Add the write history.
        fname_abbr = ku.abbreviate_file_path(file_name)
        self.history.add('Written to file.', ('File name: ' + fname_abbr,))
        # Add the history to the header.
        bf.write_history_header(prihdu.header, self.history)

        # Combine the HDUs and write to file.
        hdulist = pyfits.HDUList([prihdu, tbhdu])
        hdulist.writeto(file_name, clobber=True)
        if self.feedback > 0 :
            print 'Wrote data to file: ' + fname_abbr