예제 #1
0
def fetch_epoch(epoch, kind, verbose=False):
    """
    Return the single epoch and the matched coadded data.
    """
    assert kind in ['stars', 'gals']
    ddir = os.environ['sgdata']

    # single epoch
    f = pf.open(ddir + 's82single_%s_%d.fits' % (kind, epoch))
    s = f[1].data
    f.close()
    N = s.field(0).size

    try:
        f = pf.open(ddir + 's82coadd_%s_%d.fits' % (kind, epoch))
        c = f[1].data
        f.close()

    except:
        print 'Matched fits for coadd doesn\'t exist, building...'

        # master
        f = pf.open(ddir + 's82coadd30k_%s_rfadely.fit' % kind)
        c = f[1].data
        f.close()

        # find the corresponding coadds
        inds = np.zeros(N, dtype=np.int)
        ind = 0
        for i in range(N):
            if verbose:
                if i % 200 == 0:
                    print 'searching', i
            coadd_objid = s.field('coadd_objid')[i]
            search = True
            while search:
                if c.field('objid')[ind] == coadd_objid:
                    inds[i] = ind
                    search = False
                ind += 1

        c = c[inds]
        if False:
            # paranoid check
            for i in range(N):
                st = '%d %d' % (c[i].field('objid'), s[i].field('coadd_objid'))
                assert c[i].field('objid') == s[i].field('coadd_objid'), st

        dt = {'E':np.float32, 'K':np.int64, 'D':np.float64, 'I':np.int16, 'K':np.int64}
        cols = []
        for i in range(len(s[0]) - 1):
            n = s._coldefs.names[i]
            f = s._coldefs.formats[i]
            cols.append(pf.Column(name=n, format=f, array=c.field(n).astype(dt[f])))

        tbhdu = pf.new_table(pf.ColDefs(cols))
        tbhdu.writeto(ddir + 's82coadd_%s_%d.fits' % (kind, epoch), clobber=True)
        c = tbhdu.data
    
    return s, c
예제 #2
0
def add_angsep_column(filename,ra=0.,dec=0.):
  '''Add a angular separation column between the photon direction and (ra,dec).
  ___arguments___:
  filename : name of your FT1 file
  ra       : Right Ascension (deg)
  dec      : Declination (deg)
  '''
  file = pyfits.open(filename)
  RA   = numpy.asarray(file['EVENTS'].data.field('RA'))
  DEC  = numpy.asarray(file['EVENTS'].data.field('DEC'))

  angsep=[]
  for it in range(len(RA)):
    angsep.append(angular_separation(ra,dec,RA[it],DEC[it]))

  colname = 'ANGSEP'
  # clobber old values
  try:
    file['EVENTS'].data.field(colname)[:] = angsep
    print 'Clobbered old ANGSEP column.'
  except:
    cols = file['EVENTS'].columns
    newcols = cols.add_col(pyfits.Column(name=colname,format='D',array=angsep))
    
    table = pyfits.new_table(newcols,header=file['EVENTS'].header)
    table.name = 'EVENTS'
    file[1] = table
    
  file.writeto(filename,clobber=True)
예제 #3
0
def sex2fits(c, fitsname, booleancols=[]):
    """	usage: sex2fits(c, fitsname, booleancols=[])
	c -- input sextutils.sextractor catalog instance
	fitsname -- output file name of the binary FITS table
	booleancols -- column names that should be converted to boolean values"""
    fitscols = []
    # construct all the columns
    for i in range(len(c._d)):
        colname = c._colnames[i]
        coltype = c._type[colname]
        colfmt = colformats[coltype]
        if coltype == 's':
            slen = c._fmt[colname][1:-1]  # length of string
            colfmt = slen + colfmt
        colarray = c.__getattribute__(colname)
        # catch the Boolean array of 0 or 1
        if colname in booleancols:
            colfmt = 'L'
            colarray = where(colarray == 0, False,
                             True)  # convert to boolean array
        col = pyfits.Column(name=colname, format=colfmt, array=colarray)
        fitscols += [col]
    # create table header unit
    cols = pyfits.ColDefs(fitscols)
    tbhdu = pyfits.new_table(cols)
    hdu = pyfits.PrimaryHDU(array(
        []))  # create a primary HDU with an empty list
    thdulist = pyfits.HDUList([hdu, tbhdu])
    thdulist.writeto(fitsname)
    return 0
예제 #4
0
    def testCreateBins(self):


        cat = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs([pyfits.Column(name = key, format = 'E', array = vals) \
                                                                        for key, vals in self.catalog.iteritems()])))
        cat.hdu.header.update('EXTNAME', 'OBJECTS')

        bins = createBins(cat,
                          [[lambda x: x['a'] == 0, lambda x: x['a'] == 1],
                           [lambda x: x['c'] == 1, lambda x: x['c'] == 0]])

        self.assertTrue(len(bins), 4)

        self.assertTrue((0, 0) in bins.keys())
        self.assertTrue(
            np.logical_and(bins[(0, 0)]['a'] == 0,
                           bins[(0, 0)]['c'] == 1).all())

        self.assertTrue((1, 0) in bins.keys())
        self.assertTrue(
            np.logical_and(bins[(1, 0)]['a'] == 1,
                           bins[(1, 0)]['c'] == 1).all())

        self.assertTrue((0, 1) in bins.keys())
        self.assertTrue(
            np.logical_and(bins[(0, 1)]['a'] == 0,
                           bins[(0, 1)]['c'] == 0).all())

        self.assertTrue((1, 1) in bins.keys())
        self.assertTrue(
            np.logical_and(bins[(1, 1)]['a'] == 1,
                           bins[(1, 1)]['c'] == 0).all())
예제 #5
0
def ccdnum2(infile, outfile=None, force=True):
    import pyfits

    f = pyfits.open(infile)
    names = np.char.upper(f[1].columns.names).tolist()

    if 'CCDNUM' in names:
        print('CCDNUM column already found; skipping...')
        return

    if outfile is None: outfile = infile
    if os.path.exists(outfile) and not force:
        found(outfile)
        return

    if not 'FILENAME' in names:
        msg = 'FILENAME column not found.'
        raise Exception(msg)

    d = f[1].data
    ccdnum = np.array(
        [a[2].strip('c') for a in np.char.split(d['filename'], '_')],
        dtype=int)
    idx = names.index('EXPNUM')
    coldefs = pyfits.ColDefs(
        [pyfits.Column(name='CCDNUM', format='I', array=ccdnum)])
    hdu = pyfits.BinTableHDU.from_columns(d.columns[:idx + 1] + coldefs +
                                          d.columns[idx + 1:])

    print("Writing %s..." % outfile)
    hdu.writeto(outfile, clobber=force)
예제 #6
0
def make_db_table(sname='final'):
    # table for GZ2 site database
    f = data_path + 'gz2sample_%s_abs_regions_counts_wvt.fits' % sname
    p = pyfits.open(f)
    d = p['data'].data
    n = len(d)
    oldcols = p['data'].columns
    cols = []
    for c in oldcols:
        name = c.name.upper()
        if name in [
                'OBJID', 'RA', 'DEC', 'PETROR90_R', 'REGION',
                'REDSHIFT_SIMPLE_BIN', 'WVT_BIN'
        ]:
            name = name.replace('PETROR90_R', 'SIZE')
            name = name.replace('_SIMPLE', '')
            name = name.replace('WVT_', 'MAGSIZE_')
            cols.append(
                pyfits.Column(name=name,
                              format=c.format,
                              array=d.field(c.name)))
    #cols.append(pyfits.Column(name='CLASSCOUNT', format='I',
    #                               array=N.zeros(n)))
    #cols.append(pyfits.Column(name='COMPCOUNT', format='I',
    #                               array=N.zeros(n)))
    tbhdu = pyfits.new_table(cols)
    tbhdu.name = 'data'
    outfile = data_path + 'gz2sample_%s_db.fits' % sname
    file_exists = os.path.isfile(outfile)
    if file_exists:
        os.remove(outfile)
    tbhdu.writeto(outfile)
    csvfile = outfile.replace('.fits', '.csv')
    fits2csv(outfile, csvfile)
    os.system('gzip %s &' % csvfile)
예제 #7
0
 def setColF(self,name,format,array):
     self.name=name
     self.format=format
     self.array=array
     a1=numpy.array([self.array],dtype=numpy.float32)
     self.col1 = pyfits.Column(name=self.name, format=self.format, array=a1)
     return self.col1
예제 #8
0
def add_columns(filename, colnames, newarrays, formats):
    # append new columns to an existing table
    oldfile = filename + ".OLD"
    try:
        os.system('mv %s %s' % (filename, oldfile))
    except:
        os.system('rm %s' % oldfile)
        os.system('mv %s %s' % (filename, oldfile))
    # write the new columns to a temporary table
    newcols = []
    for i in range(len(colnames)):
        newcols += [
            pyfits.Column(name=colnames[i],
                          format=formats[i],
                          array=newarrays[i])
        ]
    newcols = pyfits.ColDefs(newcols)
    tbhdu = pyfits.new_table(newcols)
    try:
        tbhdu.writeto('temp.fits')
    except:
        os.system('rm temp.fits')
        tbhdu.writeto('temp.fits')
    # Now read in the old table and merge
    h1 = pyfits.open(oldfile)
    h2 = pyfits.open('temp.fits')
    h = h1[1].columns + h2[1].columns  # merge the columns
    newhdu = pyfits.new_table(h)
    newhdu.writeto(filename)
    return 0
예제 #9
0
def write_fits_file(data, in_header, out_header, name, verbose=False):
    """Write fits file.
    """

    ncol = len(out_header)

    if verbose:
        print('Guessing column type info, from first-row entries:')

    cols = []
    for i in range(ncol):

        entry = data[in_header[i]][0]
        if isinstance(entry, float):
            form = 'E'
        elif isinstance(entry, int):
            form = 'I'
        else:
            form = '100A'

        if verbose:
            print('  col {}: {} -> \'{}\''.format(i, entry, form, end=''))

        new_col = fits.Column(name=out_header[i],
                              format=form,
                              array=data[in_header[i]])
        cols.append(new_col)

    coldefs = fits.ColDefs(cols)
    hdu = fits.BinTableHDU.from_columns(coldefs)
    if verbose:
        print('Writing FITS file \'{}\''.format(name))
    hdu.writeto(name, clobber=True)
예제 #10
0
def WriteFITSTable(file_name, data_array, fields=None):
    """
    Given a ``file_name`` and a ``data_array``, write the ``data_array`` to the ``file_name`` as a
    FITS file if there is an available module to do so (``pyfits`` or ``astropy.io.fits``).
    Otherwise, raise an error.

    If ``fields`` is not None, this will rearrange a NumPy formatted array to the field
    specification (must be either a list of field names, or a dict of the form ``'field_name':
    field_position/original_order_field_name``.  Note that if ``fields`` is a dict which does not
    completely describe all the fields less than its maximum field number, columns not indicated by
    the dict will be moved around to fill in any gaps.  If you specify, say, columns 0, 1, and 3,
    you may be surprised by what is in column 2!

    .. note::
       At the moment, if your maximum column number in the ``fields`` dict is greater than the
       number of fields in the ``data_array``, an error will occur.
    """
    if not has_fits:
        raise ImportError(
            'FITS-type table requested, but no FITS handler found')
    data = _handleFields(data_array, fields)
    cols = [
        fits_handler.Column(name=data.dtype.names[i],
                            format=_coerceFitsFormat(data.dtype[i]),
                            array=data[data.dtype.names[i]])
        for i in range(len(data.dtype))
    ]
    table = fits_handler.new_table(cols)
    hdulist = fits_handler.HDUList(fits_handler.PrimaryHDU(), table)
    hdulist.append(table)
    hdulist.verify()
    hdulist.writeto(file_name)
예제 #11
0
def write_fits_table(tbl, names, filename, formats=False):

    table = np.asarray(tbl)

    if table.ndim != 2: raise NameError('I was expecting a 2D data table')

    nrows, ncols = table.shape

    if ncols != len(names):
        raise NameError(
            'Number of supplied names does not match number of table columns')

    if not formats:
        if table.dtype.kind == 'i': formats = ['K'] * ncols
        elif table.dtype.kind == 'f': formats = ['D'] * ncols
        elif table.dtype.kind == 'S': formats = ['A'] * ncols
        else:
            raise NameError(
                "Couldn't figure out what type of data this is - please provide column formats"
            )

    cols = [0] * ncols

    for i in range(ncols):
        cols[i] = pyfits.Column(name=names[i],
                                format=formats[i],
                                array=table[:, i])

    tbhdu = pyfits.new_table(cols)

    tbhdu.writeto(filename, clobber=True)
예제 #12
0
def select_columns(tbhdu, *fieldnames): 
    """
    Select particular columns from given table
    
    A new table with only the asked columns ('fieldnames')
    is output.

    Input:
     - tbhdu : pyfits.open('data.fit')[?]
        Table HDU, often "?" equals 1
     - cols : str,
        Comma separated list of variables to be read from 'hdulist'

    Output:
     -> (new) BinTableHDU, with just the selected fields
    
    ---
    """
	
    coldefs = tbhdu.columns;
    tbdata = tbhdu.data;
    inds = [ tbdata.names.index(id.upper()) for id in fieldnames ];
    cols = [];
    for i in inds:
        cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=tbdata.field(i)) );
    coldefs = pyfits.ColDefs(cols);
    
    return pyfits.new_table(coldefs);
예제 #13
0
def make_db_table2():
    # table for GZ2 site database which matches old table columns
    f = data_path + 'gz2sample_final_abs_regions_counts_wvt.fits'
    p = pyfits.open(f)
    d = p['data'].data
    n = len(d)
    oldcols = p['data'].columns
    cols = []
    i = 0
    colnames = [
        'objectId', 'run', 'rerun', 'camcol', 'field', 'obj', 'rightAscension',
        'declination', 'apparentMagnitude', 'petrosianRadius'
    ]
    for name in [
            'OBJID', 'RUN', 'RERUN', 'CAMCOL', 'FIELD', 'OBJ', 'RA', 'DEC',
            'PETROMAG_R', 'PETROR90_R'
    ]:
        for c in oldcols:
            # original GZ table has entries:
            if name == c.name:
                print c.name, colnames[i]
                cols.append(
                    pyfits.Column(name=colnames[i],
                                  format=c.format,
                                  array=d.field(c.name)))
                i += 1
    print cols
    tbhdu = pyfits.new_table(cols)
    tbhdu.name = 'data'
    outfile = data_path + 'gz2sample_db2.fits'
    file_exists = os.path.isfile(outfile)
    if file_exists:
        os.remove(outfile)
    tbhdu.writeto(outfile)
예제 #14
0
파일: fitsfunc.py 프로젝트: dhanson/healpy
def write_cl(filename, cl, dtype=npy.float32):
    """Writes Cl into an healpix file, as IDL cl2fits.

    Parameters
    ----------
    filename : str
      the fits file name
    cl : array
      the cl array to write to file, currently TT only
    """
    # check the dtype and convert it
    fitsformat = getformat(dtype)
    if isinstance(cl, list):
        raise NotImplementedError(
            'Currently it supports only temperature-only cls')
    else:  # we write only one TT
        cols = [
            pyf.Column(name='TEMPERATURE', format='%s' % fitsformat, array=cl)
        ]

    coldefs = pyf.ColDefs(cols)
    tbhdu = pyf.new_table(coldefs)
    # add needed keywords
    tbhdu.header.update('CREATOR', 'healpy')
    tbhdu.writeto(filename, clobber=True)
예제 #15
0
def mwrfits(filename, data, hdu=1, colnames=None, keys=None):
    """Write columns to a fits file in a table extension.

    Parameters
    ----------
    filename : str
      The fits file name
    data : list of 1D arrays
      A list of 1D arrays to write in the table
    hdu : int, optional
      The header where to write the data. Default: 1
    colnames : list of str
      The column names
    keys : dict-like
      A dictionary with keywords to write in the header
    """
    # Check the inputs
    if colnames is not None:
        if len(colnames) != len(data):
            raise ValueError("colnames and data must the same length")
    else:
        colnames = [''] * len(data)
    cols = []
    for line in xrange(len(data)):
        cols.append(
            pf.Column(name=colnames[line],
                      format=getformat(data[line]),
                      array=data[line]))
    tbhdu = pf.new_table(cols)
    if type(keys) is dict:
        for k, v in keys.items():
            tbhdu.header.update(k, v)
    # write the file
    tbhdu.writeto(filename, clobber=True)
예제 #16
0
def change_column_names(filename, old_colnames, new_colnames):
    """
	Change the name of a column.
	Pyfits does not really provide a convenient function to do this, so I'll have to 
	create a new table based on the old table, just use a different column name.
	"""
    os.system('mv %s %s.copy' % (filename, filename))
    c = pyfits.open(filename + ".copy")
    tbhdu = c[1]
    ncol = len(tbhdu.data.columns)
    newcols = []
    for i in range(ncol):
        colname = tbhdu.data.columns[i].name
        colfmt = tbhdu.data.formats[i]
        colarr = tbhdu.data.field(colname)
        for j in range(len(old_colnames)):
            if tbhdu.data.columns[i].name == old_colnames[j]:
                colname = new_colnames[j]
                break
                #print colname
        newcols += [pyfits.Column(name=colname, format=colfmt, array=colarr)]
    newcols = pyfits.ColDefs(newcols)
    #print newcols
    newhdu = pyfits.new_table(newcols)
    newhdu.writeto(filename)
    c.close()
    os.system('rm %s.copy' % filename)
예제 #17
0
def getFitsColumns(sourceSet, outputs):
    """ Return pyfits columns defined by the outputs structure.

    This is hopefully temporary, until schemas define everything
    we want to write. It uses the existing io.writeFits data structures.
    """

    nSource = len(sourceSet)
    nOut = len(outputs)

    # create the arrays and fill them
    arrays = {}
    for i in range(nOut):
        columnName = outputs[i]["label"]
        arrays[columnName] = numpy.zeros(nSource, dtype=outputs[i]["dtype"])

        for j, source in enumerate(sourceSet):
            getMethod = getattr(source, outputs[i]["get"])
            X = getMethod()
            if outputs[i]['angle']:
                X = X.asDegrees()
            arrays[columnName][j] = X

    # create the column defs
    columnDefs = []
    for i in range(nOut):
        columnName = outputs[i]["label"]
        columnDefs.append(
            pyfits.Column(name=columnName,
                          format=outputs[i]["fitstype"],
                          unit=outputs[i]['units'],
                          array=arrays[columnName]))
    return columnDefs
예제 #18
0
 def test_variable_length_table_data(self):
     c1 = fits.Column(name='var', format='PJ()',
                      array=np.array([[45.0, 56], np.array([11, 12, 13])], 'O'))
     c2 = fits.Column(name='xyz', format='2I', array=[[11, 3], [12, 4]])
     tbhdu = fits.BinTableHDU.from_columns([c1, c2])
     tbhdu.writeto(self.temp('tmp.fits'), clobber=True, checksum=True)
     with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
         assert comparerecords(tbhdu.data, hdul[1].data)
         assert 'CHECKSUM' in hdul[0].header
         assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
         assert 'DATASUM' in hdul[0].header
         assert hdul[0].header['DATASUM'] == '0'
         assert 'CHECKSUM' in hdul[1].header
         assert hdul[1].header['CHECKSUM'] == 'YIGoaIEmZIEmaIEm'
         assert 'DATASUM' in hdul[1].header
         assert hdul[1].header['DATASUM'] == '1507485'
예제 #19
0
def sort_by_column(tbhdu,fieldname):
    """
    Sort a FITS table HDU by "fieldname" column in increasing order. 
    
    Inputs:
     - tbhdu: FITS table HDU
     - fieldname <str> : field name of the column to sort

    Output:
     - new tbhdu with data sorted according to 'fieldname' column
    
    """
    from operator import itemgetter, attrgetter

    coldefs = tbhdu.columns
    tbdata = tbhdu.data
    index = tbdata.names.index(fieldname)
	
    sorted_data = np.transpose(sorted(tbdata,key=itemgetter(index)))


    cols = [];
    for i in xrange(len(coldefs.names)):
        cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=sorted_data[i]) );
    coldefs = pyfits.ColDefs(cols);
    
    return pyfits.new_table(coldefs);    
예제 #20
0
 def writeData(self,name,format,data):
     self.name=name
     self.format=format
     self.data=data
     a2=numpy.array([self.data],dtype=numpy.float32)
     self.col2 = pyfits.Column(name=self.name, format=self.format, array=a2)
     return self.col2
예제 #21
0
    def test_add_del_columns2(self):
        hdulist = fits.open(self.data('tb.fits'))
        table = hdulist[1]
        assert table.data.dtype.names == ('c1', 'c2', 'c3', 'c4')
        assert table.columns.names == ['c1', 'c2', 'c3', 'c4']
        table.columns.del_col('c1')
        assert table.data.dtype.names == ('c2', 'c3', 'c4')
        assert table.columns.names == ['c2', 'c3', 'c4']

        table.columns.del_col('c3')
        assert table.data.dtype.names == ('c2', 'c4')
        assert table.columns.names == ['c2', 'c4']

        table.columns.add_col(fits.Column('foo', '3J'))
        assert table.data.dtype.names == ('c2', 'c4', 'foo')
        assert table.columns.names == ['c2', 'c4', 'foo']

        hdulist.writeto(self.temp('test.fits'), clobber=True)
        with ignore_warnings():
            # TODO: The warning raised by this test is actually indication of a
            # bug and should *not* be ignored. But as it is a known issue we
            # hide it for now.  See
            # https://github.com/spacetelescope/PyFITS/issues/44
            with fits.open(self.temp('test.fits')) as hdulist:
                table = hdulist[1]
                assert table.data.dtype.names == ('c2', 'c4', 'foo')
                assert table.columns.names == ['c2', 'c4', 'foo']
예제 #22
0
def run_corr2(x,
              y,
              g1,
              g2,
              min_sep=0.1,
              max_sep=10.,
              nbins=8,
              temp_cat='temp.cat',
              params_file='corr2.params',
              m2_file_name='temp.m2',
              xy_units='degrees',
              sep_units='degrees',
              corr2_exec='corr2'):
    """Run Mike Jarvis' corr2 correlation function code using FITS files for the I/O.
    """
    import os
    import subprocess
    import tempfile
    import pyfits
    # Create temporary, unique files for I/O
    catfile = tempfile.mktemp(suffix=temp_cat)
    m2file = tempfile.mktemp(suffix=m2_file_name)
    # Use fits binary table for faster I/O. (Converting to/from strings is slow.)
    assert x.shape == y.shape
    assert x.shape == g1.shape
    assert x.shape == g2.shape
    x_col = pyfits.Column(name='x', format='1D', array=x.flatten())
    y_col = pyfits.Column(name='y', format='1D', array=y.flatten())
    g1_col = pyfits.Column(name='g1', format='1D', array=g1.flatten())
    g2_col = pyfits.Column(name='g2', format='1D', array=g2.flatten())
    cols = pyfits.ColDefs([x_col, y_col, g1_col, g2_col])
    table = pyfits.new_table(cols)
    phdu = pyfits.PrimaryHDU()
    hdus = pyfits.HDUList([phdu, table])
    hdus.writeto(catfile, clobber=True)
    subprocess.Popen([
        corr2_exec, params_file, 'file_name=' + str(catfile),
        'm2_file_name=' + str(m2file), 'file_type=FITS',
        'min_sep=%f' % min_sep,
        'max_sep=%f' % max_sep,
        'nbins=%f' % nbins, 'x_units=' + str(xy_units),
        'y_units=' + str(xy_units), 'sep_units=' + str(sep_units)
    ]).wait()
    results = np.loadtxt(m2file)
    os.remove(catfile)
    os.remove(m2file)
    return results
예제 #23
0
    def testTransferOffsets(self):

        transferFilters = 'SUBARU-9-2-W-J-V SUBARU-10_1-1-W-J-V MEGAPRIME-0-1-g'.split()
        transfer_orig_zps = [23.4, 25.3, 22.4]
        correspondingFilters = {'SUBARU-9-2-W-J-V' : 'SUBARU-10_2-1-W-J-V',
                                'SUBARU-10_1-1-W-J-V' : 'SUBARU-10_2-1-W-J-V',
                                'MEGAPRIME-0-1-g' : None}
        
        filternames = self.filternames + transferFilters
        orig_zps = self.orig_zps.tolist() + transfer_orig_zps

        zplist = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs([pyfits.Column(name = 'filter', format='20A', 
                                                                             array = filternames),
                                                               pyfits.Column(name = 'zeropoints', format='E', 
                                                                             array = orig_zps)])))

        saveSlrZP(cluster = 'testcluster', offsetFile = self.offsetFile, 
                  zplist = zplist, fluxtype = 'iso', myspec = 'custom',
                  photometry_db = self.db)

        for filter in filternames:

            correspondingFilter = filter
            if filter in correspondingFilters:                
                correspondingFilter = correspondingFilters[filter]

            if correspondingFilter is not None:

                slrmatch = None
                for slr in self.db.slr:
                    if correspondingFilter == slr.fitFilter:
                        slrmatch = slr
                        break
                self.assertTrue(slrmatch is not None)

                calibmatch = None
                for calib in self.db.calibrations:
                    if filter == calib.filter:
                        calibmatch = calib
                        break
                self.assertTrue(calibmatch is not None)

                self.assertEquals(calibmatch.cluster, 'testcluster')
                self.assertEquals(calibmatch.filter, filter)
                self.assertEquals(calibmatch.fluxtype, 'iso')
                self.assertEquals(calibmatch.myspec, 'custom')
                self.assertEquals(calibmatch.calibration, slrmatch)
예제 #24
0
def makeCrossValCats(cat, nsets):

    if isinstance(cat, ldac.LDACCat):

        nentries = len(cat)
    else:
        nentries = len(cat[cat.keys()[0]])

    indices = np.random.permutation(nentries)

    slicepoints = np.linspace(0, nentries, nsets + 1)

    cat_sets = []

    for cur_set_index in range(nsets):

        raw_testing_cat = {}
        raw_training_cat = {}

        for key in cat.keys():

            for i in range(nsets):

                if i == cur_set_index:
                    raw_testing_cat[key] = cat[key][
                        indices[slicepoints[i]:slicepoints[i + 1]]]

                else:
                    if key not in raw_training_cat:
                        raw_training_cat[key] = []
                    raw_training_cat[key].append(
                        cat[key][indices[slicepoints[i]:slicepoints[i + 1]]])


        testing_cat = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs([pyfits.Column(name = key, format = 'E', array = vals) \
                                                                        for key, vals in raw_testing_cat.iteritems()])))
        testing_cat.hdu.header.update('EXTNAME', 'OBJECTS')


        training_cat = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs([pyfits.Column(name = key, format = 'E', array = np.hstack(vals)) \
                                                                         for key, vals in raw_training_cat.iteritems()])))
        training_cat.hdu.header.update('EXTNAME', 'OBJECTS')

        cat_sets.append((training_cat, testing_cat))

    return cat_sets
예제 #25
0
def make_outfile(fitsfile, outfile, phi, model, baddata):
    """
    creates a fits file identical to the input fits file save from containing two extra columns - TRANSIT_MODL and PHASE which are the sum of basis vectors fit to the data and the resulting corrected flux after the basis vector fit has been subtracted
    """
    newmodel = putInNans(baddata, model)
    newphi = putInNans(phi, model)
    col1 = pyfits.Column(name='TRANSIT_MODL',
                         format='E13.7   ',
                         unit='',
                         array=newmodel)
    col2 = pyfits.Column(name='PHASE',
                         format='E13.7   ',
                         unit='days',
                         array=newphi / 24.)  # phi is in hours by default
    cols = fitsfile[1].columns + col1 + col2
    fitsfile[1] = pyfits.new_table(cols, header=fitsfile[1].header)
    fitsfile.writeto(outfile)
예제 #26
0
def write_extract_fits(ofile, ap_list, clobber=False):
    """Write out the extracted spectrum to a FITS table.  If the file already
        exists, this will not overwrite it.

        For each spectrum in ap_list, it will add another extension to the
        fits file.  Each extension will have the first column as wavelength,
        the second column as counts, and the third column as sigma on the
        counts.

        ofile: Output file to write

        ap_list:  List of extracted spectrum

        clobber: delete ofile if it already exists


    """
    # delete the file
    if os.path.isfile(ofile) and clobber:
        saltio.delete(ofile)

    # create the primary array
    hdu = pyfits.PrimaryHDU()
    hdulist = pyfits.HDUList([hdu])

    # create the columns and the
    for ap in ap_list:
        fvar = abs(ap.lvar)**0.5
        # create the columns
        col1 = pyfits.Column(name='wavelength',
                             format='D',
                             unit='Angstroms',
                             array=ap.wave)
        col2 = pyfits.Column(name='counts',
                             format='D',
                             unit='Counts',
                             array=ap.ldata)
        col3 = pyfits.Column(name='counts_err', format='D', array=fvar)

        # add to the table
        tbhdu = pyfits.new_table([col1, col2, col3])
        hdulist.append(tbhdu)

    # write it out
    hdulist.writeto(ofile)
    return
예제 #27
0
def writefits(x_gas, x_dmp, v_gas, v_dmp, nha, z, fits):
    Np = x_gas.shape[1]
    fmt3 = "{0:d}E".format(Np)
    fmt1 = "E"
    extheader = pf.Header()
    extheader["redshift"] = "{0:.3F}".format(z)

    coldefs = pf.ColDefs([
        pf.Column(name="x_gas", format=fmt3, array=x_gas),
        pf.Column(name="x_dmp", format=fmt3, array=x_dmp),
        pf.Column(name="v_gas", format=fmt3, array=v_gas),
        pf.Column(name="v_dmp", format=fmt3, array=v_dmp),
        pf.Column(name="nha", format=fmt1, array=nha)
    ])
    tbhdu = pf.BinTableHDU.from_columns(coldefs, header=extheader)
    tbhdu.writeto(fits, clobber=True)
    print "write to ", fits, Np, "objects"
예제 #28
0
def genDefectFits(cameraName, source, targetDir):
    mapper = mapperMap[cameraName.lower()](root=".")
    camera = mapper.camera

    ccds = dict()
    for ccd in camera:
        ccdNum = ccd.getId()
        ccds[ccdNum] = ccd.getName()

    defects = dict()

    f = open(source, "r")
    for line in f:
        line = re.sub("\#.*", "", line).strip()
        if len(line) == 0:
            continue
        ccd, x0, y0, width, height = re.split("\s+", line)
        ccd = int(ccd)
        if ccd not in ccds:
            raise RuntimeError("Unrecognised ccd: %d" % ccd)
        if ccd not in defects:
            defects[ccd] = list()
        defects[ccd].append(
            Defect(x0=int(x0),
                   y0=int(y0),
                   width=int(width),
                   height=int(height)))
    f.close()

    for ccd in ccds:
        # Make empty defect FITS file for CCDs with no defects
        if ccd not in defects:
            defects[ccd] = list()

        columns = list()
        for colName in Defect._fields:
            colData = numpy.array([d._asdict()[colName] for d in defects[ccd]])
            col = pyfits.Column(name=colName, format="I", array=colData)
            columns.append(col)

        cols = pyfits.ColDefs(columns)
        table = pyfits.BinTableHDU.from_columns(cols)

        table.header['NAME'] = ccd

        name = os.path.join(targetDir, "defects_%d.fits" % ccd)
        print("Writing %d defects from CCD %d (%s) to %s" %
              (table.header['NAXIS2'], ccd, ccds[ccd], name))
        if os.path.exists(name):
            if args.force:
                os.unlink(name)
            else:
                print("File %s already exists; use --force to overwrite" %
                      name,
                      file=sys.stderr)
                continue

        table.writeto(name)
예제 #29
0
파일: analysis.py 프로젝트: irhamta/MLZ
def save_PDF(zfine,
             pdfs,
             Pars,
             path='',
             filebase='',
             num=-1,
             oob='no',
             var='',
             multiple='no',
             rank=0):
    """
    Saves photo-z PDFs
    """

    if path == '':
        path = Pars.path_results
    if not os.path.exists(path): os.system('mkdir -p ' + path)
    if filebase == '':
        filebase = Pars.finalfilename
    if num == -1:
        for j in range(100):
            if os.path.exists(path + filebase + '.' + str(j) +
                              '.mlz') and os.path.exists(path + filebase +
                                                         '.' + str(j) +
                                                         '.P.npy'):
                continue
            else:
                fileoutPDF = path + filebase + '.' + str(j) + '.P'
                if oob == 'yes':
                    fileoutPDF = path + filebase + '_oob' + var + '.' + str(
                        j) + '.P'
                break
    else:
        fileoutPDF = path + filebase + '.' + str(num) + '.P'
        if oob == 'yes':
            fileoutPDF = path + filebase + '_oob' + var + '.' + str(num) + '.P'

    if multiple == 'yes': fileoutPDF = fileoutPDF + '_' + str(rank)
    pdfs = concatenate((pdfs, [zfine]))
    if Pars.writefits == 'no':
        save(fileoutPDF, pdfs)
    else:
        head = pf.Header()
        head['N_TOT'] = len(pdfs) - 1
        head['DZ'] = zfine[1] - zfine[0]
        head['NPOINTS'] = len(zfine)
        head[
            'COMMENT'] = 'The last row of the table are the redshift positions'
        head['COMMENT'] = 'This file was created using MLZ'
        head['HISTORY'] = 'Created on ' + datetime.datetime.now().strftime(
            "%Y-%m-%d  %H:%M")
        fmt = '%dE' % len(zfine)
        col0 = pf.Column(name='PDF values', format=fmt, array=pdfs)
        #table0 = pf.new_table(pf.ColDefs([col0]))
        table0 = pf.BinTableHDU.from_columns(pf.ColDefs([col0]))
        prihdu = pf.PrimaryHDU(header=head)
        hdulist = pf.HDUList([prihdu, table0])
        hdulist.writeto(fileoutPDF + '.fits', clobber=True)
예제 #30
0
def DoSimRun(position_file,image_files,psf_files,bands,coadd_image, coadd_weights):
	command = {}
	command = __config__['balrog'].copy()
	command['nonosim']      = True
	command['noweightread'] = True
	command['nodraw']       = True

	ngal = len(pyfits.open( '%s.fits' % __tilename__)[1].data)
	command['ngal'] = ngal
	command['tile'] = __tilename__
	command['poscat'] = '%s.fits' % __tilename__
	command['seed'] = __config__['seed_balrog']+HashTile(__tilename__)

	for band_ in xrange(1,len(bands)):
		band = bands[band_]
		img  = image_files[band_]
		psf  = psf_files[band_]

		command['detpsf']    = psf_files[0]
		command['detimage']  = coadd_image
		command['detweight'] = coadd_weights
		command['psf'] = psf_files[band_]
		command['image'] = image_files[band_]
		command['outdir'] = './'+band+'/'
		command['zeropoint'] = GetZeroPoint(image_files[band_],band)
		command['band'] = band

		RunBalrog(command)

	nosim_files = []
	nosim_cols  = []
	for band_ in xrange(1,len(bands)):
		nosim_files.append( pyfits.open('./%s/balrog_cat/DES2051-5248_%s.measuredcat.sim.fits'%(bands[band_],bands[band_]) )[2].data )

		for col_ in nosim_files[-1].columns:
			if col_.name != 'VECTOR_ASSOC':
				nosim_cols.append( pyfits.Column(name=(col_.name+'_'+bands[band_]).lower(), format=col_.format, array=nosim_files[-1][col_.name] ) )
			else:
				truth_files = pyfits.open('./%s/balrog_cat/DES2051-5248_%s.truthcat.sim.fits'%(bands[band_],bands[band_]) )[1].data
				_col_ = truth_files.columns.names.index('balrog_index')
				nosim_cols.append( pyfits.Column(name=('balrog_index_'+bands[band_]).lower(), format='D', array=nosim_files[-1]['VECTOR_ASSOC'][:,_col_] ) )


	hdu = pyfits.BinTableHDU.from_columns( nosim_cols )
	hdu.writeto( '%s_sim.fits'%__tilename__,clobber=True )