def write_zbest(results, name='', path='', extrahdu=True): col1 = fits.Column(name='BRICKNAME', format='8A', array=results['brickname']) col2 = fits.Column(name='TARGETID', format='K', array=results['i']) col3 = fits.Column(name='Z', format='D', array=results['z']) col4 = fits.Column(name='ZERR', format='D', array=0.5 * (results['z68_hi'] - results['z68_lo'])) col5 = fits.Column(name='ZWARN', format='K', array=results['zwarn']) col6 = fits.Column(name='TYPE', format='20A', array=results['type']) col7 = fits.Column(name='SUBTYPE', format='20A', array=results['subtype']) col8 = fits.Column(name='ZMAP', format='D', array=results['z_best']) col9 = fits.Column(name='Z50', format='D', array=results['z50']) col10 = fits.Column(name='Z95HI', format='D', array=results['z95_hi']) col11 = fits.Column(name='Z95LO', format='D', array=results['z95_lo']) col12 = fits.Column(name='Z68HI', format='D', array=results['z68_hi']) col13 = fits.Column(name='Z68LO', format='D', array=results['z68_lo']) cols = fits.ColDefs([col1, col2, col3, col4, col5, col6, col7]) cols2 = fits.ColDefs([col8, col9, col10, col11, col12, col13]) tbhdu = fits.new_table(cols) tbhdu.name = 'ZBEST' tbhdu2 = fits.new_table(cols2) tbhdu2.name = 'BAYEZ' hdu = fits.PrimaryHDU(1) prihdr = fits.Header() prihdr[ 'COMMENT'] = "Bayez redshift estimation. As for now error is computed as (zhi68-zlow68)/2" prihdu = fits.PrimaryHDU(header=prihdr) if (extrahdu ): ##Extrahdu writes an extra hdu to include Bayez especific output thdulist = fits.HDUList([prihdu, tbhdu, tbhdu2]) else: thdulist = fits.HDUList([prihdu, tbhdu]) thdulist.writeto(path + name)
def write_zbest(results, name='', path='', extrahdu=True): col1 = fits.Column(name='BRICKNAME',format='8A', array=results['brickname']) col2 = fits.Column(name='TARGETID', format='K', array=results['i']) col3 = fits.Column(name='Z', format='D', array=results['z']) col4 = fits.Column(name='ZERR',format='D', array=0.5*(results['z68_hi']-results['z68_lo'])) col5 = fits.Column(name='ZWARN', format='K', array=results['zwarn']) col6 = fits.Column(name='TYPE', format='20A', array=results['type']) col7 = fits.Column(name='SUBTYPE', format='20A', array=results['subtype']) col8 = fits.Column(name='ZMAP', format='D', array=results['z_best']) col9 = fits.Column(name='Z50', format='D', array=results['z50']) col10 = fits.Column(name='Z95HI',format='D', array=results['z95_hi']) col11 = fits.Column(name='Z95LO',format='D', array=results['z95_lo']) col12 = fits.Column(name='Z68HI',format='D', array=results['z68_hi']) col13 = fits.Column(name='Z68LO',format='D', array=results['z68_lo']) cols = fits.ColDefs([col1,col2,col3,col4,col5,col6,col7]) cols2 = fits.ColDefs([col8,col9,col10,col11,col12,col13]) tbhdu = fits.new_table(cols) tbhdu.name = 'ZBEST' tbhdu2 = fits.new_table(cols2) tbhdu2.name = 'BAYEZ' hdu = fits.PrimaryHDU(1) prihdr = fits.Header() prihdr['COMMENT']="Bayez redshift estimation. As for now error is computed as (zhi68-zlow68)/2" prihdu = fits.PrimaryHDU(header=prihdr) if(extrahdu): ##Extrahdu writes an extra hdu to include Bayez especific output thdulist = fits.HDUList([prihdu,tbhdu, tbhdu2]) else: thdulist = fits.HDUList([prihdu,tbhdu]) thdulist.writeto(path+name)
def write(self, outname, clobber=False ): hdu = pyfits.HDUList(pyfits.PrimaryHDU()) hdu[0].header['INSTRUME'] = 'COS' hdu[0].header['DETECTOR'] = 'FUV' hdu.append(pyfits.new_table(self.events_a) ) hdu[1].header['SEGMENT'] = 'FUVA' hdu.append(pyfits.new_table(self.events_b) ) hdu[2].header['SEGMENT'] = 'FUVB' hdu.writeto(outname, clobber=clobber)
def writeDefectsFile(bboxList, path, detectorSerial, detectorName): """Write a defects FITS file. Parameters ---------- bboxList : `list` of `lsst.geom.Box2I` List of bounding boxes defining defect locations. path : `str` Path of output defects file; should end with ".fits". detectorSerial : `str` Serial code of detector. detectorName : `str` Name of detector. """ head = fits.Header() head.update('SERIAL', detectorSerial, 'Serial of the detector') head.update('NAME', detectorName, 'Name of detector for this defect map') head.update('CDATE', time.asctime(time.gmtime()), 'UTC of creation') x0 = numpy.array([d.getMinX() for d in bboxList]) y0 = numpy.array([d.getMinY() for d in bboxList]) width = numpy.array([d.getWidth() for d in bboxList]) height = numpy.array([d.getHeight() for d in bboxList]) col1 = fits.Column(name='x0', format='I', array=numpy.array(x0)) col2 = fits.Column(name='y0', format='I', array=numpy.array(y0)) col3 = fits.Column(name='height', format='I', array=numpy.array(height)) col4 = fits.Column(name='width', format='I', array=numpy.array(width)) cols = fits.ColDefs([col1, col2, col3, col4]) tbhdu = fits.new_table(cols, header=head) hdu = fits.PrimaryHDU() thdulist = fits.HDUList([hdu, tbhdu]) thdulist.writeto(DefectsPath)
def write_fits(self): """ Save the ascii catalog data into a FITS bintable. The modification date of the ascii catalog is saved in the 'MODTIME' keyword of the FITS file """ import time formats = {} formats['bool'] = 'L' formats['int16'] = 'I' formats['int32'] = 'J' formats['int64'] = 'K' formats['float32'] = 'E' formats['float64'] = 'D' formats['>i8'] = 'K' formats['>f8'] = 'D' #### Make the table columns, translating numpy data types to "TFORM" coldefs = [] for column in self.columns: dtype = str(self.__getitem__(column).dtype) #print column, dtype if dtype in list(formats.keys()): TFORM = formats[dtype] else: if 'S' not in dtype: threedhst.showMessage('Unrecognized data type in %s: %s' % (self.filename, dtype), warn=True) return False # TFORM = 'A' + dtype.split('S')[1] # data = self.__getitem__(column) if '>' in dtype: cast_types = {'>i8': np.int64, '>f8': np.float64} data = np.cast[cast_types[dtype]](data) # coldefs.append(pyfits.Column(name=column, array=data, format=TFORM)) #### Done, now make the binary table tbhdu = pyfits.new_table(coldefs) #### Primary HDU hdu = pyfits.PrimaryHDU() thdulist = pyfits.HDUList([hdu, tbhdu]) #### Add modification time of "infile" to FITS header infile_mod_time = time.strftime( "%m/%d/%Y %I:%M:%S %p", time.localtime(os.path.getmtime(self.filename))) thdulist[1].header.update('MODTIME', infile_mod_time) thdulist.writeto(self.filename + '.FITS', clobber=True) return True
def __merge_gti__(handles, no_table=False, interval=None): """Return a FITS table of merged GTI.""" if len(handles) == 0: return ([], []) #Merge the gti and sort the results starts = N.concatenate([x['GTI'].data.field('START') for x in handles]) stops = N.concatenate([x['GTI'].data.field('STOP') for x in handles]) sorting = N.argsort(starts) starts = starts[sorting] stops = stops[sorting] if interval is not None: mask = N.logical_and(starts > interval[0], stops < interval[1]) starts = N.append(interval[0], starts[mask]) stops = N.append(stops[mask], interval[1]) if no_table: return (starts, stops) #Put GTI in new table gti_table = pf.new_table(handles[0]['GTI'].columns, nrows=len(starts)) gti_table.data.field('START')[:] = starts gti_table.data.field('STOP')[:] = stops return gti_table
def save(self, filename: str): """Save spectrum to FITS file Args: filename: If given, save to this filename. """ # filename filename = filename or self._filename if not filename: raise ValueError('No filename given for saving the FITS file.') # create binary table c1 = fits.Column(name='WAVE', format='1D', unit='Angstrom', array=self._wavelength) c2 = fits.Column(name='FLUX', format='1E', unit='erg/s/cm2/Angstrom', array=self.flux) hdu = fits.new_table([c1, c2]) hdu.header["EXTNAME"] = "SPECTRUM" # create HDU list and save it hdu_list = fits.HDUList([hdu]) hdu_list.writeto(filename) # store filename self._filename = filename
def phase_cut(eventfile,outputfile=None,phaseranges=[[0,1]],phase_col_name='PULSE_PHASE'): """Select phases within a set of intervals. outputfile - set to change the default output (eventfile_PHASECUT.fits) phaseranges - a set of ranges on which to make inclusive cuts NB -- there was a problem with using the mask as below. Seems to be resolved, but check for consistency in output FITS file to be safe.""" from numpy import array ef = PF.open(expandvars(eventfile)) ph = array(ef['EVENTS'].data.field(phase_col_name)).astype(float) mask = array([False]*len(ph)) for r in phaseranges: for i,myph in enumerate(ph): if (r[0]<= myph) and (myph <= r[1]): mask[i]=True duty_cycle = sum( (x[1] - x[0] for x in phaseranges) ) print 'Selecting %d / %d photons (duty cycle = %.2f)'%(mask.sum(),len(mask),duty_cycle) hdu = PF.new_table(ef['EVENTS'].columns,nrows=mask.sum()) for i in xrange(len(ef['EVENTS'].columns)): hdu.data.field(i)[:]=ef['EVENTS'].data.field(i)[mask] ef['EVENTS'].data=hdu.data if outputfile: ef.writeto(outputfile,clobber=True) else: ef.writeto(eventfile.replace('.fits','_PHASECUT.fits'),clobber=True) ef.close()
def writeSpotFITS(pathSpec, outDir, data, headerDict): # raise NotImplementedError("writeSpotFITS() no longer needed or tested") phdu = fits.PrimaryHDU() phdr = phdu.header phdr.update('pixscale', 0.003, 'mm/pixel') cols = [] cols.append( fits.Column(name='fiberIdx', format='I', array=data['fiberIdx'])) cols.append( fits.Column(name='wavelength', format='D', array=data['wavelength'])) cols.append(fits.Column(name='spot_xc', format='D', array=data['spot_xc'])) cols.append(fits.Column(name='spot_yc', format='D', array=data['spot_yc'])) spots = data['spot'][:] spotw = spots[0].shape[0] spots.shape = (len(spots), spotw * spotw) cols.append( fits.Column(name='spot', format='%dE' % (spotw * spotw), dim='(%d,%d)' % (spotw, spotw), array=spots)) colDefs = fits.ColDefs(cols) thdu = fits.new_table(colDefs) hdulist = fits.HDUList([phdu, thdu]) filename = "%(date)s_%(band)s_%(frd)02d_%(focus)+02d.fits" % pathSpec hdulist.writeto(os.path.join(outDir, filename), checksum=True, clobber=True)
def select_columns(tbhdu, *fieldnames): """ Select particular columns from given table A new table with only the asked columns ('fieldnames') is output. Input: - tbhdu : pyfits.open('data.fit')[?] Table HDU, often "?" equals 1 - cols : str, Comma separated list of variables to be read from 'hdulist' Output: -> (new) BinTableHDU, with just the selected fields --- """ coldefs = tbhdu.columns tbdata = tbhdu.data inds = [tbdata.names.index(id.upper()) for id in fieldnames] cols = [] for i in inds: cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=tbdata.field(i))) coldefs = pyfits.ColDefs(cols) return pyfits.new_table(coldefs)
def appendFitsTable(file1, file2, outname, extension = 1): """ Appending one table fits file to the another the output table will inherit column attributes of the first fits table Input: file1 --- fits table file2 --- fits table (will be appended to file1) outname --- the name of the new fits file Output: a new fits file "outname" """ t1 = fits.open(file1) t2 = fits.open(file2) # #-- find numbers of rows (two different ways as examples here) # nrow1 = t1[extension].data.shape[0] nrow2 = t2[extension].header['naxis2'] # #--- total numbers of rows to be created # nrows = nrow1 + nrow2 hdu = fits.new_table(t1[extension].columns, nrows=nrows) # #--- append by the field names # for name in t1[extension].columns.names: hdu.data.field(name)[nrow1:] = t2[extension].data.field(name) # #--- write new fits data file # hdu.writeto(outname) t1.close() t2.close()
def write_ring_fits(fitsname, res, taun_rings, radii, dstar): # make Column objects with our output data col1 = fits.Column(name='taun', format='E', array=taun_rings) col2 = fits.Column(name='radius', format='E', array=radii) # create a ColDefs object for all the columns cols = fits.ColDefs([col1, col2]) # create the binary table HDU object - a BinTableHDU tbhdu = fits.new_table(cols) prihdr = fits.Header() prihdr['TIMPACT'] = (res[0], 'Impact parameter (days)') prihdr['TMINR'] = (res[1], 'Time of minimum disk radius (days)') prihdr['DINCL'] = (res[2], 'Disk inclination (degrees)') prihdr['DTILT'] = (res[3], 'Disk tilt to orbital motion (degrees)') prihdr['DSTAR'] = (dstar, 'Diameter of star (days)') prihdr['HN'] = (0.907, 'Henweigh parameter') # open a PrimaryHDU object with no data (since you can't have TableHDU # in a PrimaryHDU) and append the TableHDU with the header prihdu = fits.PrimaryHDU(header=prihdr) thdulist = fits.HDUList([prihdu, tbhdu]) thdulist.writeto(fitsname, clobber=True) print 'write_ring_fits: wrote FITS file to %s' % fitsname
def gen_write_fits(file_name, col_names, columns): """Write some columns to an output FITS file with the given column names. :param file_name: The name of the file to write to. :param col_names: A list of columns names for the given columns. :param columns: A list of numpy arrays with the data to write. """ try: import fitsio data = numpy.empty(len(columns[0]), dtype=[(name, 'f8') for name in col_names]) for (name, col) in zip(col_names, columns): data[name] = col fitsio.write(file_name, data, clobber=True) except ImportError: try: import astropy.io.fits as pyfits except: import pyfits cols = pyfits.ColDefs([ pyfits.Column(name=name, format='D', array=col) for (name, col) in zip(col_names, columns) ]) # Depending on the version of pyfits, one of these should work: try: tbhdu = pyfits.BinTableHDU.from_columns(cols) except: tbhdu = pyfits.new_table(cols) tbhdu.writeto(file_name, clobber=True)
def wavelengthCalFitsTable(outpath, base_name, order, col, source, wave_exp, wave_fit, res, peak, slope): prihdr = fits.Header() prihdr['COMMENT'] = "wavelength calibration table" prihdu = fits.PrimaryHDU(header=prihdr) tbhdu = fits.new_table( fits.ColDefs([ fits.Column(name='order', format='1I', array=order), fits.Column(name='source', format='1A', array=source), fits.Column(name='col (pixels)', format='1D', array=col), fits.Column(name='wave_exp (Angstroms)', format='1D', array=wave_exp), fits.Column(name='wave_fit (Angstroms)', format='1D', array=wave_fit), fits.Column(name='res (Angstroms)', format='1D', array=res), fits.Column(name='peak (counts)', format='1D', array=peak), fits.Column(name='disp (Angstroms/pixel)', format='1D', array=slope) ])) thdulist = fits.HDUList([prihdu, tbhdu]) fn = constructFileName(outpath, base_name, None, 'calids.tbl') thdulist.writeto(fn, clobber=True) log_fn(fn) return
def join_cats(cs, outputfile): tables = {} i = 0 cols = [] seqnr = 0 for c in cs: if len(c) == 2: TAB = c[1] c = c[0] else: TAB = 'STDTAB' i += 1 print c tables[str(i)] = pyfits.open(c) for column in tables[str(i)][TAB].columns: if column.name == 'SeqNr': if not seqnr: seqnr += 1 else: column.name = column.name + '_' + str(seqnr) seqnr += 1 cols.append(column) #print cols print len(cols) hdu = pyfits.PrimaryHDU() hduSTDTAB = pyfits.new_table(cols) hdulist = pyfits.HDUList([hdu]) hdulist.append(hduSTDTAB) hdulist[1].header.update('EXTNAME', 'STDTAB') import os os.system('rm ' + outputfile) print outputfile hdulist.writeto(outputfile)
def write_ring_fits(fitsname, res, taun_rings, radii, dstar): # make Column objects with our output data col1 = fits.Column(name='taun', format='E', array=taun_rings) col2 = fits.Column(name='radius', format='E', array=radii) # create a ColDefs object for all the columns cols = fits.ColDefs([col1, col2]) # create the binary table HDU object - a BinTableHDU tbhdu = fits.new_table(cols) prihdr = fits.Header() prihdr['TIMPACT'] = (res[0], 'Impact parameter (days)') prihdr['TMINR'] = (res[1], 'Time of minimum disk radius (days)') prihdr['DINCL'] = (res[2], 'Disk inclination (degrees)') prihdr['DTILT'] = (res[3], 'Disk tilt to orbital motion (degrees)') prihdr['DSTAR'] = (dstar, 'Diameter of star (days)') prihdr['HN'] = (0.907, 'Henweigh parameter') # open a PrimaryHDU object with no data (since you can't have TableHDU # in a PrimaryHDU) and append the TableHDU with the header prihdu = fits.PrimaryHDU(header=prihdr) thdulist = fits.HDUList([prihdu, tbhdu]) thdulist.writeto(fitsname, clobber=True) print('write_ring_fits: wrote FITS file to %s' % fitsname)
def strip_gtis(): path_GTI = hard_coded_dir + '/' + 'GTI_FITS.fits' hdulist = fits.open(path_GTI) gti_header = hdulist[0] #get individual gti's gtis_to_use = hdulist[1].data counter = 0 for x in gtis_to_use['start']: counter += 1 #create data column of new fits file start_array = np.array([x]) stop_list = gtis_to_use['stop'] stop_array = np.array([stop_list[counter-1]]) col1 = fits.Column(name='start', format='E', array=start_array) col2 = fits.Column(name='stop', format='E', array=stop_array) cols = fits.ColDefs([col1,col2]) tbhdu = fits.new_table(cols) thdulist = fits.HDUList([gti_header,tbhdu]) print hard_coded_dir + '/' + 'sub_GTI_FITS_' + str(counter) + '.fit' thdulist.writeto(hard_coded_dir + '/' + 'sub_GTI_FITS_' + str(counter) + '.fit') produce_xron_gti(hard_coded_dir + '/' + 'sub_GTI_FITS_' + str(counter) + '.fit', hard_coded_dir + '/' + 'sub_GTI_FITS_' + str(counter) + '_xronwin' + '.fit') hdulist.close()
def sort_by_column(tbhdu, fieldname): """ Sort a FITS table HDU by the its "fieldname" column in increasing order. Inputs: - tbhdu: FITS table HDU - fieldname <str> : field name of the column to sort Output: - new tbhdu with data sorted according to 'fieldname' column """ from operator import itemgetter, attrgetter coldefs = tbhdu.columns tbdata = tbhdu.data index = tbdata.names.index(fieldname) sorted_data = np.transpose(sorted(tbdata, key=itemgetter(index))) cols = [] for i in xrange(len(coldefs.names)): cols.append( pyfits.Column(name=coldefs[i].name, format=coldefs[i].format, array=sorted_data[i])) coldefs = pyfits.ColDefs(cols) return pyfits.new_table(coldefs)
def fixMatrixHDU(matrixHDU): #This creates a copy of the input matrix with all variable-length arrays converted to fixed length, #of the smallest possible size. #This is needed because pyfits makes all sort of fuckups with variable-length arrays newcols = [] for col in matrixHDU.columns: if (col.format.find("P") == 0): #Variable-length newMatrix = variableToMatrix(matrixHDU.data.field(col.name)) length = len(newMatrix[0]) coltype = col.format.split("(")[0].replace("P", "") newFormat = '%s%s' % (length, coltype) newcols.append( pyfits.Column(col.name, newFormat, col.unit, col.null, col.bscale, col.bzero, col.disp, col.start, col.dim, newMatrix)) else: newcols.append( pyfits.Column(col.name, col.format, col.unit, col.null, col.bscale, col.bzero, col.disp, col.start, col.dim, matrixHDU.data.field(col.name))) pass newtable = pyfits.new_table(newcols, header=matrixHDU.header) return newtable
def extend_tbHDU(tbhdu_A, tbhdu_B): """ Extend first tbHDU with second entries The output is a table (HDU) with column 'B' lines extending 'A' entries. Column names on both input table need to be the same. Input: - tbhdu_A : FITS binary table HDU - tbhdu_B : FITS binary table HDU Output: - tbhdu : FITS binary table HDU Result from extention, A+B --- """ Nrows_A = tbhdu_A.header['NAXIS2'] Nrows_B = tbhdu_B.header['NAXIS2'] Nrows = Nrows_A + Nrows_B new_tb = pyfits.new_table(tbhdu_A.columns, nrows=Nrows) for name in tbhdu_A.columns.names: new_tb.data.field(name)[Nrows_A:] = tbhdu_B.data.field(name) return new_tb
def main(): ###make sure to change these when running in a new enviorment!### #location of data directory filepath = '/scratch/dac29/data/CFHTLS-T0007/photo_z_pdz/' #save data to directory... savepath = '/scratch/dac29/output/processed_data/terapix/photo_z_catalogues/' filename='pdz_W1_270912_part1.fits' hdulist1 = fits.open(filepath+filename, memmap=True) tbdata1 = hdulist1[1].data filename='pdz_W1_270912_part2.fits' hdulist2 = fits.open(filepath+filename, memmap=True) tbdata2 = hdulist2[1].data nrows1=tbdata1.shape[0] nrows2=tbdata2.shape[0] nrows=nrows1+nrows2 print nrows #create a new table and merge the two tables hdu = fits.new_table(hdulist1[1].columns, nrows=nrows) for name in hdulist1[1].columns.names: hdu.data.field(name)[nrows1:]=tbdata2.field(name) #save new table to disk hdu.writeto(savepath+'pdz_W1_270912.fits', clobber=True) print 'done! remember to copy W2, W3, and W4 to the output directory!'
def main(): ###make sure to change these when running in a new enviorment!### #location of data directory filepath = '/scratch/dac29/data/CFHTLS-T0007/photo_z_pdz/' #save data to directory... savepath = '/scratch/dac29/output/processed_data/terapix/photo_z_catalogues/' filename = 'pdz_W1_270912_part1.fits' hdulist1 = fits.open(filepath + filename, memmap=True) tbdata1 = hdulist1[1].data filename = 'pdz_W1_270912_part2.fits' hdulist2 = fits.open(filepath + filename, memmap=True) tbdata2 = hdulist2[1].data nrows1 = tbdata1.shape[0] nrows2 = tbdata2.shape[0] nrows = nrows1 + nrows2 print nrows #create a new table and merge the two tables hdu = fits.new_table(hdulist1[1].columns, nrows=nrows) for name in hdulist1[1].columns.names: hdu.data.field(name)[nrows1:] = tbdata2.field(name) #save new table to disk hdu.writeto(savepath + 'pdz_W1_270912.fits', clobber=True) print 'done! remember to copy W2, W3, and W4 to the output directory!'
def write_cl(filename, cl, dtype=np.float64): """Writes Cl into an healpix file, as IDL cl2fits. Parameters ---------- filename : str the fits file name cl : array the cl array to write to file, currently TT only """ # check the dtype and convert it fitsformat = getformat(dtype) column_names = ['TEMPERATURE', 'GRADIENT', 'CURL', 'G-T', 'C-T', 'C-G'] if isinstance(cl, list): cols = [ pf.Column(name=column_name, format='%s' % fitsformat, array=column_cl) for column_name, column_cl in zip(column_names[:len(cl)], cl) ] else: # we write only one TT cols = [ pf.Column(name='TEMPERATURE', format='%s' % fitsformat, array=cl) ] tbhdu = pf.new_table(cols) # add needed keywords tbhdu.header.update('CREATOR', 'healpy') writeto(tbhdu, filename)
def write_fitsbinary(fitter, outparlist = None): """Write fit results to a FITS binary table. Too highly specialized""" if outparlist is None: outparlist = fitter.rp['outparnames'] #best-fit chi^2 cb = fitter.basel.structure_array(fitter.max_lnprob*(-2), ['chibest']) #paramater percentiles pst = [] for i, par in enumerate(outparlist): print(par, fitter.parval[par].shape) print(['{0}_p{1:5.3f}'.format(par.replace('galex_',''), pt) for pt in fitter.rp['percentiles']]) pst += [fitter.basel.structure_array(fitter.parval[par][:,0:3], ['{0}_p{1:03.0f}'.format(par.replace('galex_',''), pt*1000) for pt in fitter.rp['percentiles']])] pst += [fitter.basel.structure_array(fitter.parval[par][:,-1], ['{0}_best'.format(par.replace('galex_',''))])] #best-fit residuals dd = [] for i, f in enumerate(fitter.rp['fit_fnamelist']): dd += [fitter.basel.structure_array(fitter.delta_best[f], ['{0}_delta_best'.format(f.replace('galex_',''))])] #put everything together (including ra, dec, and other header info) and write it out cat = fitter.basel.join_struct_arrays( [fitter.data_header, fitter.data_mag, fitter.data_magerr, cb] + pst + dd ) cols = pyfits.ColDefs(cat) tbhdu = pyfits.new_table(cols) tbhdu.writeto('{0}_starprops.fits'.format(fitter.rp['outname']), clobber = True)
def to_fits(self, filename, format=None, clobber=False, history=''): if format is None: format = healpix.default_fits_format_codes[self.get_dtype().type] hdu0 = pyfits.PrimaryHDU() col0 = pyfits.Column(name='signal', format=format, array=self.map.map) col1 = pyfits.Column(name='weights', format=format, array=self.wgt.map) col_inds = [ pyfits.Column(name='sp_index%d' % n, format=format, array=i.map) for n, i in enumerate(self.ind) ] cols = pyfits.ColDefs([col0, col1] + col_inds) tbhdu = pyfits.new_table(cols) self.map._set_fits_header(tbhdu.header) hdulist = pyfits.HDUList([hdu0, tbhdu]) if history != '': history = [h.strip() for h in history.split("\n")] for line in history: if len(line) > 1: if line.startswith('#'): for subline in img.word_wrap(line, 80, 0, 0, '').split("\n"): hdulist[0].header.add_history(subline) else: for subline in img.word_wrap(line, 70, 5, 10, '#').split("\n"): hdulist[0].header.add_history(subline) hdulist.writeto(filename, clobber=clobber)
def EditFitsFile(column_dict, filename, extension, header_info=[]): print "Editing extension number %i" % extension columns = [] for key in column_dict.keys(): columns.append(pyfits.Column(name=key, format="D", array=column_dict[key])) cols = pyfits.ColDefs(columns) tablehdu = pyfits.new_table(cols) # Add keywords to extension header num_keywords = len(header_info) header = tablehdu.header for i in range(num_keywords): info = header_info[i] if len(info) > 2: header.update(info[0], info[1], info[2]) elif len(info) == 2: header.update(info[0], info[1]) #Open file and update the appropriate extension hdulist = pyfits.open(filename, mode='update', save_backup=True) if extension < len(hdulist): hdulist[extension] = tablehdu else: hdulist.append(tablehdu) hdulist.flush() hdulist.close() return
def mwrfits(filename,data,hdu=1,colnames=None,keys=None): """Write columns to a fits file in a table extension. Parameters ---------- filename : str The fits file name data : list of 1D arrays A list of 1D arrays to write in the table hdu : int, optional The header where to write the data. Default: 1 colnames : list of str The column names keys : dict-like A dictionary with keywords to write in the header """ # Check the inputs if colnames is not None: if len(colnames) != len(data): raise ValueError("colnames and data must the same length") else: colnames = ['']*len(data) cols=[] for line in six.moves.xrange(len(data)): cols.append(pf.Column(name=colnames[line], format=getformat(data[line]), array=data[line])) tbhdu = pf.new_table(cols) if type(keys) is dict: for k,v in keys.items(): tbhdu.header.update(k,v) # write the file writeto(tbhdu, filename)
def project(self, cube): arr_code = [] arr_rest_freq = [] dba = db.lineDB(self.dbpath) dba.connect() freq_init_corr = cube.freq_border[0] / (1 + self.z) freq_end_corr = cube.freq_border[1] / (1 + self.z) count = 0 used = False for mol in self.intens: # For each molecule specified in the dictionary # load its spectral lines linlist = dba.getSpeciesLines(mol, freq_init_corr, freq_end_corr) # Selected spectral lines for this molecule for lin in linlist: count += 1 freq = (1 + self.z) * lin[3] # Catalogs must be in Mhz window = freq_window(freq, cube.freq_axis) cube.data[window] = 1 used = True arr_code.append(mol + "-f" + str(lin[3])) arr_rest_freq.append(window) dba.disconnect() if not used: return tbhdu = fits.new_table(fits.ColDefs([ fits.Column(name='line_code', format='40A', array=arr_code), \ fits.Column(name='line', format='D', array=arr_rest_freq) \ ])) cube._add_HDU(tbhdu)
def __call__(self, outfile): self.cols = [] z = self.z[self.z.ts > self.TScut] # limit for now # assume sorted already #z.sort(order=('ra')) #z.ts = z.ts2 #kluge for now self.check = False self.bad = z.ts < 9 self.add('NickName', z.name) self.add('RAJ2000', z.ra) self.add('DEJ2000', z.dec) sdir = map(SkyDir, z.ra, z.dec) self.add('GLON', [s.l() for s in sdir]) self.add('GLAT', [s.b() for s in sdir]) # localization f95 = 2.45 * 1.1 # from self.add('LocalizationQuality', z.locqual) self.add('Conf_95_SemiMajor', f95 * z.a) self.add('Conf_95_SemiMinor', f95 * z.b) self.add('Conf_95_PosAng', z.ang) self.add('Test_Statistic', z.ts) # Spectral details self.add('SpectrumType', z.modelname) self.add('Pivot_Energy', z.e0) # note that pivot_energy is the measured value self.add('Flux_Density', z.flux) self.add('Unc_Flux_Density', z.flux_unc) self.add('Spectral_Index', z.pindex) self.add('Unc_Spectral_Index', z.pindex_unc) self.add('Index2', z.index2) self.add('Unc_Index2', z.index2_unc) self.add('Cutoff_Energy', z.cutoff) self.add('Cutoff_Energy_Unc', z.cutoff_unc) self.add('SpectralFitQuality', z.fitqual) self.add('Extended', pd.isnull(z.locqual)) #if self.add_assoc: # assoc = Assoc() # for idcol in 'Number Name Probability RA DEC Angsep Catalog'.split(): # h = 'ID_'+idcol # adata = np.array([assoc(name).field(h)[0] for name in z.name]) # self.add(h, adata) # make the FITS stuff table = pyfits.new_table(self.cols) table.name = '4year_LAT_Source_Catalog' if os.path.exists(outfile): os.remove(outfile) self.hdus = [ pyfits.PrimaryHDU(header=None), #primary table, # this table ] if self.add_assoc: self.hdus += [ assoc.cat_ref, ] # the catalog reference (copied) self.finish(outfile)
def writeifits(outfile, xpix, ypix, iamp, iparam, it0): """ Save injection parameters for each pixel. Parameters: outfile -- output file name xpix -- image pixel x index ypix -- image pixel y index iamp -- injection amplitude iparam -- injection parameters (e.g. duration) it0 -- injection start time Outputs: (none, but outfile is saved to disk) """ # create fits table cols = [fits.Column(name="xpix", array=xpix.ravel(), format="I"), fits.Column(name="ypix", array=ypix.ravel(), format="I"), fits.Column(name="amp", array=iamp.ravel(), format="E"), fits.Column(name="param", array=iparam.ravel(), format="E"), fits.Column(name="mjd", array=it0.ravel(), format="D")] hdu = fits.new_table(cols) hdu.writeto(outfile, clobber=True) return
def mapcube_to_healpix(inputfile, suffix='_nside256_bpd4', inpath= '$FERMI/diffuse', outpath=None, energy_bins=np.logspace(1.75,6.5, 20), emin=58.5 ): """ convert a MapCube to column format, consistent with diffuse group """ fullinputfile = os.path.expandvars(os.path.join(inpath,inputfile)) assert os.path.exists(fullinputfile), 'File not found: %s' % fullinputfile d = skymaps.DiffuseFunction(fullinputfile) galbands = [] energies = np.sqrt( energy_bins[:-1] * energy_bins[1:] ) for elow, ehigh in zip(energy_bins[:-1], energy_bins[1:]): print elow,ehigh d.setEnergyRange(max(elow, emin), ehigh) t = HPskyfun('e_%d'%np.sqrt(elow*ehigh), d, 256) t.setcol() t.vec /= (ehigh-elow) galbands.append(t) tt = HEALPixFITS(galbands) flux_table = tt.make_table() energy_column = pyfits.Column('MeV', format='E', unit='MeV', array=energies) energy_table = pyfits.new_table([energy_column]) energy_table.name='ENERGIES' hdus = [ pyfits.PrimaryHDU(header=None), flux_table, energy_table] fulloutfile = os.path.expandvars(os.path.join(outpath if outpath is not None else inpath, inputfile.replace('.fits', suffix+'.fits'))) if os.path.exists(fulloutfile): os.remove(fulloutfile) print 'writing output file %s' %fulloutfile pyfits.HDUList(hdus).writeto(fulloutfile)
def gen_write_fits(file_name, col_names, columns): """Write some columns to an output FITS file with the given column names. :param file_name: The name of the file to write to. :param col_names: A list of columns names for the given columns. :param columns: A list of numpy arrays with the data to write. """ import numpy try: import fitsio data = numpy.empty(len(columns[0]), dtype=[ (name,'f8') for name in col_names ]) for (name, col) in zip(col_names, columns): data[name] = col fitsio.write(file_name, data, clobber=True) except ImportError: try: import astropy.io.fits as pyfits except: import pyfits cols = pyfits.ColDefs([ pyfits.Column(name=name, format='D', array=col) for (name, col) in zip(col_names, columns) ]) # Depending on the version of pyfits, one of these should work: try: tbhdu = pyfits.BinTableHDU.from_columns(cols) except: tbhdu = pyfits.new_table(cols) tbhdu.writeto(file_name, clobber=True)
def write_cl(filename, cl, dtype=np.float64): """Writes Cl into an healpix file, as IDL cl2fits. Parameters ---------- filename : str the fits file name cl : array the cl array to write to file, currently TT only """ # check the dtype and convert it fitsformat = getformat(dtype) column_names = ['TEMPERATURE','GRADIENT','CURL','G-T','C-T','C-G'] if isinstance(cl, list): cols = [pf.Column(name=column_name, format='%s'%fitsformat, array=column_cl) for column_name, column_cl in zip(column_names[:len(cl)], cl)] else: # we write only one TT cols = [pf.Column(name='TEMPERATURE', format='%s'%fitsformat, array=cl)] tbhdu = pf.new_table(cols) # add needed keywords tbhdu.header.update('CREATOR','healpy') writeto(tbhdu, filename)
def mwrfits(filename, data, hdu=1, colnames=None, keys=None): """Write columns to a fits file in a table extension. Parameters ---------- filename : str The fits file name data : list of 1D arrays A list of 1D arrays to write in the table hdu : int, optional The header where to write the data. Default: 1 colnames : list of str The column names keys : dict-like A dictionary with keywords to write in the header """ # Check the inputs if colnames is not None: if len(colnames) != len(data): raise ValueError("colnames and data must the same length") else: colnames = [''] * len(data) cols = [] for line in six.moves.xrange(len(data)): cols.append( pf.Column(name=colnames[line], format=getformat(data[line]), array=data[line])) tbhdu = pf.new_table(cols) if type(keys) is dict: for k, v in keys.items(): tbhdu.header.update(k, v) # write the file writeto(tbhdu, filename)
def generate_grid(photbands, vrads=[0], ebvs=[0], zs=[0], law='claret', fitmethod='equidist_r_leastsq', outfile='mygrid.fits', **kwargs): if os.path.isfile(outfile): hdulist = pf.open(outfile, mode='update') existing_bands = [ext.header['extname'] for ext in hdulist[1:]] else: hdulist = pf.HDUList([]) hdulist.append(pf.PrimaryHDU(np.array([[0, 0]]))) existing_bands = [] hd = hdulist[0].header hd.update('FIT', fitmethod, 'FIT ROUTINE') hd.update('LAW', law, 'FITTED LD LAW') hd.update('GRID', kwargs.get('grid', defaults['grid']), 'GRID') for photband in photbands: if photband in existing_bands: logger.info('BAND {} already exists: skipping'.format(photband)) continue pars, coeffs, Imu1s = fit_law_to_grid(photband, vrads=vrads, ebvs=ebvs, zs=zs, **kwargs) cols = [] cols.append(pf.Column(name='Teff', format='E', array=pars[:, 0])) cols.append(pf.Column(name="logg", format='E', array=pars[:, 1])) cols.append(pf.Column(name="ebv", format='E', array=pars[:, 2])) cols.append(pf.Column(name="vrad", format='E', array=pars[:, 3])) cols.append(pf.Column(name="z", format='E', array=pars[:, 4])) for col in range(coeffs.shape[1]): cols.append( pf.Column(name='a{:d}'.format(col + 1), format='E', array=coeffs[:, col])) cols.append(pf.Column(name='Imu1', format='E', array=Imu1s[:, 0])) cols.append(pf.Column(name='SRS', format='E', array=Imu1s[:, 1])) cols.append(pf.Column(name='dint', format='E', array=Imu1s[:, 2])) newtable = pf.new_table(pf.ColDefs(cols)) newtable.header.update('EXTNAME', photband, "SYSTEM.FILTER") newtable.header.update('SYSTEM', photband.split('.')[0], 'PASSBAND SYSTEM') newtable.header.update('FILTER', photband.split('.')[1], 'PASSBAND FILTER') hdulist.append(newtable) if os.path.isfile(outfile): hdulist.close() else: hdulist.writeto(outfile)
def save(self, sourceID, matchinfo): """Writes a new catalogue with primaryID to disk.""" # Write the (sourceID,primaryID)s to a table col_sourceID = fits.Column(name='sourceID', format='15A', array=sourceID) col_nObs = fits.Column(name='nObs', format='B', array=matchinfo['nObs']) col_primaryID = fits.Column(name='primaryID', format='15A', array=matchinfo['primaryID']) col_partnerID = fits.Column(name='sourceID2', format='15A', null='', array=matchinfo['partnerID']) col_fieldID2 = fits.Column(name='fieldID2', format='15A', array=matchinfo['fieldID2']) col_r2 = fits.Column(name='r2', format='E', unit='Magnitude', array=matchinfo['r2']) col_rErr2 = fits.Column(name='rErr2', format='E', unit='Sigma', array=matchinfo['rErr2']) col_i2 = fits.Column(name='i2', format='E', unit='Magnitude', array=matchinfo['i2']) col_iErr2 = fits.Column(name='iErr2', format='E', unit='Sigma', array=matchinfo['iErr2']) col_ha2 = fits.Column(name='ha2', format='E', unit='Magnitude', array=matchinfo['ha2']) col_haErr2 = fits.Column(name='haErr2', format='E', unit='Sigma', array=matchinfo['haErr2']) col_errBits2 = fits.Column(name='errBits2', format='J', null=-2147483648, unit='bitmask', array=matchinfo['errBits2']) cols = fits.ColDefs([col_sourceID, col_nObs, col_primaryID, col_partnerID, col_fieldID2, col_r2, col_rErr2, col_i2, col_iErr2, col_ha2, col_haErr2, col_errBits2]) newtable = fits.new_table(cols) newtable.writeto(self.primaryid_file, clobber=True) # Then use stilts to add the extra column config = {'STILTS': constants.STILTS, 'IN1': self.filename(self.fieldid), 'IN2': self.primaryid_file, 'OUT': self.output_file} cmd = "{STILTS} tmatch2 progress=none find=best1 in1={IN1} in2={IN2} " cmd += "matcher=exact join=all1 suffix1='' " cmd += "values1='sourceID' values2='sourceID' " cmd += "ocmd='delcols sourceID_2' out='{OUT}' " stilts_cmd = cmd.format(**config) log.debug(stilts_cmd) status = os.system(stilts_cmd) if status == 0: self.log_info('adding primaryID column: stilts returned '+str(status)) else: self.log_warning('adding primaryID column: stilts returned '+str(status)) return status
def write_fits(self, fname, header=None): """Writes the power spectrum/smooth function to the ascii file 'fname' """ n_col = 4 if self.dpE is not None: n_col += 3 if self.band is True: n_col += 2 if header is None: my_header = ['col_{}'.format(x) for x in range(n_col)] else: my_header = header.replace('#', '').split() scales = rad_to_unit(self.ell, self.unit_out) cols = [] i = 0 cols.append(fits.Column(name=my_header[i], format='E', array=scales)) i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=self.pE)) i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=self.pB)) i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=self.pEB)) if self.dpE is not None: i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=self.dpE)) i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=self.dpB)) i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=self.dpEB)) if self.band is True: ell_l = [] ell_u = [] for j in range(self.Nell): my_ell_l, my_ell_u = self.ell_l_u(j) my_ell_l = rad_to_unit(my_ell_l, self.unit_out) my_ell_u = rad_to_unit(my_ell_u, self.unit_out) ell_l.append(my_ell_l) ell_u.append(my_ell_u) i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=ell_l)) i += 1 cols.append(fits.Column(name=my_header[i], format='E', array=ell_u)) fcols = fits.ColDefs(cols) if sys.version_info >= (3,3): hdu = fits.BinTableHDU.from_columns(fcols) else: hdu = fits.new_table(fcols) if self.unit_out is not 'none': hdu.header.append(card=('UNITS', self.unit_out, 'Coordinate units')) with warnings.catch_warnings(): warnings.simplefilter('ignore') hdu.writeto(fname, clobber=True)
def main(opts, pixfiles): flags = [] for pixfile in pixfiles: print "Processing " + pixfile flags.extend(get_pixmask(pixfile, opts.infits)) hdu = fits.new_table([fits.Column(name="flag", array=flags, format="L")]) hdu.writeto(opts.outfits, clobber=True)
def tofits(self, filename, columns=("freq", "fluxcal")): """Save spectrum to FITS file""" cols = [] for i in columns: cols.append(pyfits.Column(name=i, format='E', array=self.__getattribute__(i))) tbhdu = pyfits.new_table(pyfits.ColDefs(cols)) tbhdu.writeto(filename, clobber=True)
def __spectrum_to_vofits(self, spectrum): t = pyfits.new_table(spectrum) t.header['TTYPE1'] = "WAVELENGTH" t.header['TTYPE2'] = "FLUX" t.header['TTYPE3'] = "SIGMA" fits = pyfits.HDUList(pyfits.PrimaryHDU()) fits.append(t) return fits
def appendparams(table, namecol='names'): tablefile = pyf.open(table) table = tablefile[1].data names = table.field(namecol) temps = [] mets = [] loggs = [] alphas = [] carbs = [] nitros = [] for name in names: temp = name[18:22] table = tablefile[1].data names = table.field(namecol) temps = [] mets = [] loggs = [] alphas = [] temp = float(temp) if name[27] =='p': met = name[28:30] else: met = '-' + name[28:30] met = float(met)/10 if name[1] =='p': alpha = name[2:4] else: alpha = '-' + name[2:4] alpha = float(alpha)/10 if name[5] =='p': carb = name[6:8] else: carb = '-' + name[6:8] carb = float(carb)/10 if name[9] =='p': nitro = name[10:12] else: nitro = '-' + name[10:12] nitro = float(nitro)/10 logg = name[24:26] logg = float(logg)/10 mets.append(met) loggs.append(logg) temps.append(temp) alphas.append(alpha) carbs.append(carb) nitros.append(nitro) tablefilecols = table.columns paramcol = pyf.ColDefs([ pyf.Column(name='A/M', format='D', array=alphas), pyf.Column(name='C/M', format='D', array=carbs), pyf.Column(name='N/M', format='D', array=nitros), pyf.Column(name='TEMP', format='D', array=temps), pyf.Column(name='LOGG', format='D', array=loggs), pyf.Column(name='MET', format='D', array=mets)]) newhdu = pyf.new_table(tablefilecols + paramcol) newhdu.writeto('ews_params.fits')
def createNewTimeline(timeline_hdu, t0, t1): """Create a TIMELINE table for the output table. Parameters ---------- timeline_hdu: pyfits BinTableHDU object, or None The TIMELINE table from the input file (may be None). t0: float Time at the start of the interval. t1: float Time at the end of the interval. Returns ------- pyfits BinTableHDU object, or None A TIMELINE table to append to the output file, or None. If there is no TIMELINE extension in the input file (indicated by timeline_hdu being None) or if the time increment is zero or negative, None will be returned. Otherwise, the returned value will have the same columns as the input timeline_hdu, but the rows will be a subset of timeline_hdu. """ if timeline_hdu is None: return None cd = timeline_hdu.columns in_data = timeline_hdu.data if in_data is None: in_nrows = 0 else: in_nrows = len(in_data) if in_nrows > 0: time_col = in_data.field("time").astype(np.float64) # "ceil(t1) + 0.1" here is to ensure that the time range # (specifically i_end) actually includes all the relevant rows # of the input TIMELINE table. # This implicitly assumes that the time increment is one second. (i_start, i_end) = ccos.range(time_col, t0, math.ceil(t1) + 0.1) out_nrows = i_end - i_start else: out_nrows = 0 out_timeline_hdu = fits.new_table(cd, header=timeline_hdu.header, nrows=out_nrows) if in_nrows > 0: out_data = out_timeline_hdu.data i = i_start for j in range(out_nrows): out_data[j] = in_data[i] i += 1 return out_timeline_hdu
def ASCIItoFITS(infile, comment='#'): """ ASCIItoFITS(infile, [comment='#']) Read an ASCII file, infile, and get column names from first line, which begins with the 'comment' character. Output will be in infile+'.FITS' """ ### Get first header line and count commented lines to skip file = open(infile,'r') line0 = file.readline() line=line0 hskip=0 while line.startswith(comment): line = file.readline() hskip +=1 file.close() #### Read data file data=np.loadtxt(infile, comments=comment) #### clean up special characters from header line0.replace('.','p') line0.replace('-','_') line0.replace('(','') line0.replace(')','') line0.replace('-','_') #### Make output FITS table header=string.split(line0[1:-1]) # make_struct='str = {' go_ColDefs='cols=pyfits.ColDefs([' for i in range(header.__len__()): col_string = 'col%d = pyfits.Column(name=\'%s\',' %(i,header[i]) + \ ' format=\'%s\', array=data[0:,%d])' %(columnFormat(header[i]),i) exec(col_string) go_ColDefs += 'col%d,' %(i) # make_struct += '\'%s\':data[0:,%d],' %(header[i],i) exec(go_ColDefs[:-1]+'])') # cols=pyfits.ColDefs([col1, col2, ...]) #### Initialize table tbhdu = pyfits.new_table(cols) #### Primary HDU hdu = pyfits.PrimaryHDU() thdulist = pyfits.HDUList([hdu,tbhdu]) #### Add modification time of "infile" to FITS header infile_mod_time = time.strftime("%m/%d/%Y %I:%M:%S %p", \ time.localtime(os.path.getmtime(infile))) thdulist[1].header.update('MODTIME',infile_mod_time) thdulist.writeto(infile+'.FITS', clobber=True) return tbhdu.data, tbhdu.columns
def make_scale(self, scale_file): """Make fits extension containing time dependent CTE scaling. The input file should have two columns with the following format:: MJD SCALE float float ... ... Columns beginning with # are ignored. Parameters ---------- scale_file : str Text file containing time dependent CTE scaling parameters. """ if not os.path.isfile(scale_file): raise IOError('Invalid scale file: {0:s}'.format( str(scale_file))) lRange, colName, colData, colForm, colUnit = 0, {}, {}, {}, {} # read in dtde data from text file with open(scale_file) as fin: for line in fin: # skip comments if line.startswith('#'): continue row = line.split() # column names if row[0] == 'MJD': colRange = range(len(row)) for i in colRange: colName[i] = row[i] colData[i] = [] # data else: for i in colRange: colData[i].append(row[i]) # convert data to numpy arrays colData[0] = np.array(colData[0], dtype=np.float32) colForm[0] = 'E' colUnit[0] = 'DAYS' colData[1] = np.array(colData[1], dtype=np.float32) colForm[1] = 'E' colUnit[1] = 'FRACTION' c0 = fits.Column(name=colName[0], format=colForm[0], unit=colUnit[0], array=colData[0]) c1 = fits.Column(name=colName[1], format=colForm[1], unit=colUnit[1], array=colData[1]) self.scale = fits.new_table(fits.ColDefs([c0,c1])) self.scale.header['EXTNAME'] = 'CTE_SCALE' self.scale.header['DATAFILE'] = (os.path.basename(scale_file), 'data source file')
def OutputFitsFileExtensions(column_dicts, template, outfilename, mode="append", headers_info=[]): """ Function to output a fits file column_dict is a dictionary where the key is the name of the column and the value is a numpy array with the data. Example of a column would be the wavelength or flux at each pixel template is the filename of the template fits file. The header will be taken from this file and used as the main header mode determines how the outputted file is made. Append will just add a fits extension to the existing file (and then save it as outfilename) "new" mode will create a new fits file. header_info takes a list of lists. Each sub-list should have size 2 where the first element is the name of the new keyword, and the second element is the corresponding value. A 3rd element may be added as a comment """ # Get header from template. Use this in the new file if mode == "new": header = pyfits.getheader(template) if not isinstance(column_dicts, list): column_dicts = [column_dicts, ] if len(headers_info) < len(column_dicts): for i in range(len(column_dicts) - len(headers_info)): headers_info.append([]) # Generate the hdu list if mode == "append": hdulist = pyfits.open(template) elif mode == "new": header = pyfits.getheader(template) pri_hdu = pyfits.PrimaryHDU(header=header) hdulist = pyfits.HDUList([pri_hdu, ]) # Make a fits binary table with the column data for i in range(len(column_dicts)): column_dict = column_dicts[i] header_info = headers_info[i] columns = [] for key in column_dict.keys(): columns.append(pyfits.Column(name=key, format="D", array=column_dict[key])) cols = pyfits.ColDefs(columns) tablehdu = pyfits.new_table(cols) #Add keywords to extension header num_keywords = len(header_info) header = tablehdu.header for i in range(num_keywords): info = header_info[i] if len(info) > 2: header.set(info[0], info[1], info[2]) elif len(info) == 2: header.set(info[0], info[1]) hdulist.append(tablehdu) #Output to file hdulist.writeto(outfilename, clobber=True, output_verify='ignore') hdulist.close()
def OutputFitsFileExtensions(column_dicts, template, outfilename, mode="append", headers_info=[]): """ Function to output a fits file column_dict is a dictionary where the key is the name of the column and the value is a numpy array with the data. Example of a column would be the wavelength or flux at each pixel template is the filename of the template fits file. The header will be taken from this file and used as the main header mode determines how the outputted file is made. Append will just add a fits extension to the existing file (and then save it as outfilename) "new" mode will create a new fits file. header_info takes a list of lists. Each sub-list should have size 2 where the first element is the name of the new keyword, and the second element is the corresponding value. A 3rd element may be added as a comment """ #Get header from template. Use this in the new file if mode == "new": header = pyfits.getheader(template) if not isinstance(column_dicts, list): column_dicts = [column_dicts, ] if len(headers_info) < len(column_dicts): for i in range(len(column_dicts) - len(headers_info)): headers_info.append([]) # Generate the hdu list if mode == "append": hdulist = pyfits.open(template) elif mode == "new": header = pyfits.getheader(template) pri_hdu = pyfits.PrimaryHDU(header=header) hdulist = pyfits.HDUList([pri_hdu,]) # Make a fits binary table with the column data for i in range(len(column_dicts)): column_dict = column_dicts[i] header_info = headers_info[i] columns = [] for key in column_dict.keys(): columns.append(pyfits.Column(name=key, format="D", array=column_dict[key])) cols = pyfits.ColDefs(columns) tablehdu = pyfits.new_table(cols) #Add keywords to extension header num_keywords = len(header_info) header = tablehdu.header for i in range(num_keywords): info = header_info[i] if len(info) > 2: header.set(info[0], info[1], info[2]) elif len(info) == 2: header.set(info[0], info[1]) hdulist.append(tablehdu) #Output to file hdulist.writeto(outfilename, clobber=True, output_verify='ignore') hdulist.close()
def write(self, outname=None, clobber=False): """ Write lightcurve out to FITS file Parameters ---------- outname : bool or str Either True/False, or output name clobber : bool Allow overwriting of existing file with same name """ if isinstance(outname, str): self.outname = outname hdu_out = pyfits.HDUList(pyfits.PrimaryHDU()) try: hdu_out[0].header = self.hdu[0].header except AttributeError: pass hdu_out[0].header['GEN_DATE'] = (str(datetime.now()), 'Creation Date') hdu_out[0].header['LC_VER'] = (__version__, 'lightcurve version used') hdu_out[0].header['AP_VER'] = (astropy.__version__, 'Astropy version used') hdu_out[0].header['NP_VER'] = (np.__version__, 'Numpy version used') hdu_out[0].header['SP_VER'] = (scipy.__version__, 'Scipy version used') bins_col = pyfits.Column('bins', 'D', 'second', array=self.bins) times_col = pyfits.Column('times', 'D', 'second', array=self.times) mjd_col = pyfits.Column('mjd', 'D', 'MJD', array=self.mjd) gross_col = pyfits.Column('gross', 'D', 'counts', array=self.gross) counts_col = pyfits.Column('counts', 'D', 'counts', array=self.counts) net_col = pyfits.Column('net', 'D', 'counts/s', array=self.net) flux_col = pyfits.Column('flux', 'D', 'ergs/s', array=self.flux) flux_error_col = pyfits.Column('flux_error', 'D', 'ergs/s', array=self.flux_error) bkgnd_col = pyfits.Column('background', 'D', 'cnts', array=self.background) error_col = pyfits.Column('error', 'D', 'counts', array=self.error) tab = pyfits.new_table( [bins_col, times_col, mjd_col, gross_col, counts_col, net_col, flux_col, flux_error_col, bkgnd_col, error_col] ) hdu_out.append( tab ) if outname.endswith('.gz'): print("Nope, can't write to gzipped files") self.outname = self.outname[:-3] hdu_out.writeto( self.outname, clobber=clobber)
def stitch(arqs, novo_arq): lct = [] bjd = [] hdu_out = abrir(arqs[0]) nrows1 = hdu_out[1].data.shape[0] head0 = hdu_out[0].header head1 = hdu_out[1].header nfiles = 0 for arq in arqs: hdu_in = abrir(arq) if nfiles > 0: nrows2 = hdu_in[1].data.shape[0] nrows = nrows1 + nrows2 tbl = fits.new_table(hdu_out[1].columns, nrows=nrows) for name in hdu_out[1].columns.names: try: tbl.data.field(name)[nrows1:] = hdu_in[1].data.field(name) except: pass hdu_out[1] = tbl hdu_out[0].header = head0 hdu_out[1].header = head1 nrows1 = nrows version = 1.0 ini = getcol(hdu_in[1], 'lc_start') fim = getcol(hdu_in[1], 'lc_end') try: startbjd = hdu_in[1].header['startbjd'] except: startbjd = getcol(hdu_in[1], 'tstart') version = 2.0 try: endbjd = hdu_in[1].header['endbjd'] except: endbjd = getcol(hdu_in[1], 'tstop') version = 2.0 lct.append(ini) lct.append(fim) bjd.append(startbjd) bjd.append(endbjd) fechar(hdu_in) nfiles += 1 ini = min(lct) fim = max(lct) startbjd = min(bjd) endbjd = max(bjd) hdu_out.header.update('lc_start', ini) hdu_out.header.update('lc_end', fim) if version == 1.0: hdu_out.header.update('startbjd', startbjd) hdu_out.header.update('endbjd', endbjd) if version == 2.0: hdu_out.header.update('tstart', startbjd) hdu_out.header.update('tstop', endbjd) hdu_out.writeto(novo_arq) fechar(hdu_out)
def write_fits(self): """ Save the ascii catalog data into a FITS bintable. The modification date of the ascii catalog is saved in the 'MODTIME' keyword of the FITS file """ import time formats = {} formats['bool'] = 'L' formats['int16'] = 'I' formats['int32'] = 'J' formats['int64'] = 'K' formats['float32'] = 'E' formats['float64'] = 'D' formats['>i8'] = 'K' formats['>f8'] = 'D' #### Make the table columns, translating numpy data types to "TFORM" coldefs = [] for column in self.columns: dtype = str(self.__getitem__(column).dtype) #print column, dtype if dtype in formats.keys(): TFORM=formats[dtype] else: if 'S' not in dtype: threedhst.showMessage('Unrecognized data type in %s: %s' %(self.filename, dtype), warn=True) return False # TFORM = 'A'+dtype.split('S')[1] # data = self.__getitem__(column) if '>' in dtype: cast_types = {'>i8':np.int64, '>f8':np.float64} data = np.cast[cast_types[dtype]](data) # coldefs.append(pyfits.Column(name=column, array=data, format=TFORM)) #### Done, now make the binary table tbhdu = pyfits.new_table(coldefs) #### Primary HDU hdu = pyfits.PrimaryHDU() thdulist = pyfits.HDUList([hdu,tbhdu]) #### Add modification time of "infile" to FITS header infile_mod_time = time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(os.path.getmtime(self.filename))) thdulist[1].header.update('MODTIME',infile_mod_time) thdulist.writeto(self.filename+'.FITS', clobber=True) return True
def _makeTableHDU(data): """ Create new_table object for ASN table, including definitions for optional Offset/Rotation columns. """ # Compute maximum length of MEMNAME for table column definition _maxlen = 0 for _fname in data['name']: if len(_fname) > _maxlen: _maxlen = len(_fname) # Enforce a mimimum size of 24 if _maxlen < 24: _maxlen = 24 namelen_str = str(_maxlen + 2) + 'A' # Column definitions use the FITS Table TFORM value for the format col1 = pyfits.Column(name='MEMNAME', format=namelen_str, array=N.char.array(data['name'])) col2 = pyfits.Column(name='MEMTYPE', format='14A', array=N.char.array(data['mtype'])) col3 = pyfits.Column(name='MEMPRSNT', format='L', array=N.array(data['mprsnt']).astype(N.uint8)) # Build columns for optional Offset/Rotation columns #col4,col5,col6 = _makeOffsetColumns(numarray.array(data['xsh']), numarray.array(data['ysh']), numarray.array(data['rot'])) xsh = pyfits.Column(name='XOFFSET', format='E', unit=data['units'], array=N.array(data['xsh'])) ysh = pyfits.Column(name='YOFFSET', format='E', unit=data['units'], array=N.array(data['ysh'])) rot = pyfits.Column(name='ROTATION', format='E', unit='degrees', array=N.array(data['rot'])) dx = pyfits.Column(name='XDELTA', format='E', unit=data['units'], array=N.array(data['dx'])) dy = pyfits.Column(name='YDELTA', format='E', unit=data['units'], array=N.array(data['dy'])) scl = pyfits.Column(name='SCALE', format='E', unit='', array=N.array(data['scl'])) hdu = pyfits.new_table([col1, col2, col3, xsh, ysh, dx, dy, rot, scl], nrows=len(data['name'])) return hdu
def write_weights(roi, ft1files, colnames=['WEIGHT', 'TOTAL'], emin=100, which=0, subset_method=True): """ Write the signal and the total rates using the current state.""" from astropy.io import fits as pyfits if not hasattr(ft1files, '__iter__'): ft1files = [ft1files] d = dict() for ft1 in ft1files: f = pyfits.open(ft1) keys = ['ENERGY', 'CONVERSION_TYPE', 'RA', 'DEC'] for key in keys: d[key] = np.asarray( f[1].data.field(key), dtype=float if key != 'CONVERSION_TYPE' else int) mask = d['ENERGY'] > emin for key in keys: d[key] = d[key][mask] if not subset_method: pre_signals, pre_backs = calc_ratios(roi, d, which=which) else: pre_signals, pre_backs = calc_ratios2(roi, d, which=which) signals = np.empty(len(mask)) backs = np.empty(len(mask)) signals[~mask] = 0 backs[~mask] = 1 signals[mask] = pre_signals backs[mask] = pre_backs d = f[1].data.names cols = [] if (colnames[0] in d): f[1].data.field(colnames[0])[:] = signals / (signals + backs) else: cols += [ pyfits.Column(name=colnames[0], format='E', array=signals / (signals + backs)) ] if (colnames[1] in d): f[1].data.field(colnames[1])[:] = signals + backs else: cols += [ pyfits.Column(name=colnames[1], format='E', array=signals + backs) ] if len(cols) > 0: tbhdu = pyfits.new_table(f[1].columns.data + cols, header=f[1].header) f[1] = tbhdu f.writeto(ft1, clobber=True) f.close()
def generate_test_files( outname='test_corrtag_a.fits', epsilon=1): hdu_out = pyfits.HDUList(pyfits.PrimaryHDU()) hdu_out[0].header.update( 'detector', 'FUV' ) hdu_out[0].header.update( 'segment', 'FUVA' ) image = np.zeros( (1024, 16384) ) image[100] = 1 y_coords, x_coords = np.where( image > 0 ) n_events = len( y_coords ) exptime = 100.0 time_col = pyfits.Column('time', 'D', 'time', array=np.linspace(0, exptime, n_events) ) rawx_col = pyfits.Column('rawx', 'I', 'MJD', array=x_coords ) rawy_col = pyfits.Column('rawy', 'I', 'MJD', array=y_coords ) xcorr_col = pyfits.Column('xcorr', 'I', 'MJD', array=x_coords ) ycorr_col = pyfits.Column('ycorr', 'I', 'MJD', array=y_coords ) xdopp_col = pyfits.Column('xdopp', 'I', 'counts', array=x_coords ) xfull_col = pyfits.Column('xfull', 'I', 'counts', array=x_coords ) yfull_col = pyfits.Column('yfull', 'I', 'counts', array=y_coords ) wavelength_col = pyfits.Column('wavelength', 'I', 'counts/s', array=np.ones( n_events ) * 1200 ) epsilon_col = pyfits.Column('epsilon', 'D', 'ergs/s', array=np.ones( n_events ) * epsilon ) dq_col = pyfits.Column('dq', 'I', 'cnts', array=np.zeros( n_events ) ) pha_col = pyfits.Column('pha', 'I', 'counts', array=np.ones( n_events ) * 14 ) tab = pyfits.new_table( [time_col, rawx_col, rawy_col, xcorr_col, ycorr_col, xdopp_col, xfull_col, yfull_col, wavelength_col, epsilon_col, dq_col, pha_col] ) hdu_out.append( tab ) hdu_out[1].header.update( 'EXTNAME', 'EVENTS' ) hdu_out[1].header.update( 'exptime', exptime ) hdu_out[1].header.update( 'expstart', 56000 ) for segment in ['a', 'b']: hdu_out[1].header.update( 'sp_hgt_{}'.format( segment ), 20 ) hdu_out[1].header.update( 'sp_loc_{}'.format( segment ), 100 ) hdu_out[1].header.update( 'b_hgt1_{}'.format( segment ), 20 ) hdu_out[1].header.update( 'b_bkg1_{}'.format( segment ), 150 ) hdu_out[1].header.update( 'b_hgt2_{}'.format( segment ), 20 ) hdu_out[1].header.update( 'b_bkg2_{}'.format( segment ), 50 ) hdu_out[1].header.update( 'sdqflags', 16 ) hdu_out.writeto( outname, clobber=True )
def writetabfits(filename, rec, units=None, overwrite=True, header=None): """ This is deprecated. Use `writetable()` with file type '.fits' instead. Writes a list of numpy arrays or a structured array to a binary fits table. Works best with structured arrays. Parameters ---------- filename : str Filename to write to. rec : Sequence of arrays or record array Data to write. units : list of str (default None) Sequence of strings giving the units for each column. hdr : fits header object (default None) A header to copy to the primary fits extension. """ try: import astropy.io.fits as pyfits except ImportError: import pyfits fmts = dict(f4='E', f8='E', i2='I', i4='J', i8='K', b1='L') try: rec.dtype except AttributeError: rec = np.rec.fromarrays(rec) if rec.dtype.names is None: raise ValueError('Input must be a list of columns or a ' 'structured array') if units is None: units = [None] * len(rec.dtype.descr) cols = [] for unit, name in zip(units, rec.dtype.names): a = rec[name] dtype = a.dtype.str[1:] if dtype.startswith('S'): fmt = 'A' + dtype[1:] else: fmt = fmts[dtype] cols.append(pyfits.Column(name=name, format=str(fmt), array=a, unit=unit)) tbhdu = pyfits.new_table(pyfits.ColDefs(cols)) tbhdu.writeto(filename, clobber=overwrite) fh = pyfits.open(filename) if header is not None: fh[0].header = header fh.writeto(filename, clobber=1, output_verify='silentfix') fh.close()
def writetabfits(filename, rec, units=None, overwrite=True, header=None): """ This is deprecated. Use `writetable()` with file type '.fits' instead. Writes a list of numpy arrays or a structured array to a binary fits table. Works best with structured arrays. Parameters ---------- filename : str Filename to write to. rec : Sequence of arrays or record array Data to write. units : list of str (default None) Sequence of strings giving the units for each column. hdr : fits header object (default None) A header to copy to the primary fits extension. """ try: import astropy.io.fits as pyfits except ImportError: import pyfits fmts = dict(f4='E', f8='E', i2='I', i4='J', i8='K', b1='L') try: rec.dtype except AttributeError: rec = np.rec.fromarrays(rec) if rec.dtype.names is None: raise ValueError('Input must be a list of columns or a ' 'structured array') if units is None: units = [None] * len(rec.dtype.descr) cols = [] for unit, name in zip(units, rec.dtype.names): a = rec[name] dtype = a.dtype.str[1:] if dtype.startswith('S'): fmt = 'A' + dtype[1:] else: fmt = fmts[dtype] cols.append( pyfits.Column(name=name, format=str(fmt), array=a, unit=unit)) tbhdu = pyfits.new_table(pyfits.ColDefs(cols)) tbhdu.writeto(filename, clobber=overwrite) fh = pyfits.open(filename) if header is not None: fh[0].header = header fh.writeto(filename, clobber=1, output_verify='silentfix') fh.close()
def to_fits(self, filename, format=None, clobber=True): """Write a HealpixMap to a fits file in the fits format specified by 'format'. Default uses mapping of numpy types to fits format types stored in default_fits_format_codes.""" if format is None: format = default_fits_format_codes[self.get_dtype().type] hdu0 = pyfits.PrimaryHDU() col0 = pyfits.Column(name='signal', format=format, array=self.map) cols = pyfits.ColDefs([col0]) tbhdu = pyfits.new_table(cols) self._set_fits_header(tbhdu.header) hdulist = pyfits.HDUList([hdu0, tbhdu]) hdulist.writeto(filename,clobber=clobber)
def profileFitsTable(outpath, base_name, order_num, profile): prihdr = fits.Header() prihdr['COMMENT'] = "profile table" prihdu = fits.PrimaryHDU(header=prihdr) tbhdu = fits.new_table( fits.ColDefs([ fits.Column(name='row (pix)', format='1I', array=np.arange(profile.shape[0], dtype=int)), fits.Column(name='mean_flux (cnts)', format='1D', array=profile)])) thdulist = fits.HDUList([prihdu, tbhdu]) fn = constructFileName(outpath, base_name, order_num, 'profile.tbl') thdulist.writeto(fn, clobber=True) log_fn(fn) return
def make_new_fits_with_phase(folders=folders, path=path, instrument='pn', t_transit_xmm=t_transit_xmm, period=period): # make new fits event files which contain orbital phase information if instrument == 'pn': instr_file_s = pn_src; instr_file_b = pn_bg if instrument == 'mos1': instr_file_s = mos1_src; instr_file_b = mos1_bg if instrument == 'mos2': instr_file_s = mos2_src; instr_file_b = mos2_bg # for all observations: for i in np.arange(0, len(folders)): # calculate and save phase for source files: hdu = fits.open(path + folders[i] + '/' + instr_file_s) data = hdu[1].data hdu.close() # calculate orbital phase: phase = np.mod(data['TIME'] - t_transit_xmm, period)/period phase[phase<0.5] = 1. + phase[phase<0.5] # what information we need for the new file: # time, phase, pi (i.e. photon energy to be able to filter later) col1 = fits.Column(name='TIME', format='E', array=data['TIME']) col2 = fits.Column(name='PHASE', format='E', array=phase) col3 = fits.Column(name='PI', format='I', array=data['PI']) cols = fits.ColDefs([col1, col2, col3]) tbhdu = fits.new_table(cols) tbhdu.writeto(path + folders[i] + '/' + instr_file_s.replace('.fits', '_phase.fits'), clobber=True) # calculate and save phase for bg files: hdu = fits.open(path + folders[i] + '/' + instr_file_b) data = hdu[1].data hdu.close() # calculate orbital phase: phase = np.mod(data['TIME'] - t_transit_xmm, period)/period phase[phase<0.5] = 1. + phase[phase<0.5] # what information we need for the new file: # time, phase, pi (i.e. photon energy to be able to filter later) col1 = fits.Column(name='TIME', format='E', array=data['TIME']) col2 = fits.Column(name='PHASE', format='E', array=phase) col3 = fits.Column(name='PI', format='I', array=data['PI']) cols = fits.ColDefs([col1, col2, col3]) tbhdu = fits.new_table(cols) tbhdu.writeto(path + folders[i] + '/' + instr_file_b.replace('.fits', '_phase.fits'), clobber=True)
def saveNewGTI(self, gti): """Append new GTI information as a BINTABLE extension. Create and save a GTI extension. If there is no GTI extension, or if there is only one, the new GTI will be appended as a new extension. If there are already two or more GTI extensions, the last one (highest EXTVER) will be replaced. Parameters ---------- gti: list of two-element lists A list of [start, stop] good time intervals. """ len_gti = len(gti) col = [] col.append(fits.Column(name="START", format="1D", unit="s")) col.append(fits.Column(name="STOP", format="1D", unit="s")) cd = fits.ColDefs(col) hdu = fits.new_table(cd, nrows=len_gti) hdu.header.update("extname", "GTI") outdata = hdu.data startcol = outdata.field("START") stopcol = outdata.field("STOP") for i in range(len_gti): startcol[i] = gti[i][0] stopcol[i] = gti[i][1] if self.gti_hdunum is None: extver = 1 inplace = False # create a new GTI extension else: last_gti = self.fd[self.gti_hdunum] extver = last_gti.header.get("extver", 1) # if there are already two GTI extensions, overwrite the last one inplace = (extver > 1) if not inplace: extver += 1 hdu.header.update("extver", extver) if inplace: self.fd[self.gti_hdunum] = hdu if self.verbose: print("GTI extension updated in-place") else: self.fd.append(hdu) self.fd[0].header.update("nextend", len(self.fd)-1) if self.verbose: print("New GTI extension appended")
def write(cls, fname, splits, data, dtres): def unpack(arr): aup = np.full(data.nanmask.size, np.nan) aup[data.nanmask] = arr return arr C = pf.Column cols = [C(name='time', format='D', array=unpack(data.time)), C(name='cadence', format='I', array=unpack(data.cadence)), C(name='quality', format='I', array=unpack(data.quality)), C(name='x', format='D', array=unpack(data.x)), C(name='y', format='D', array=unpack(data.y))] for i in range(data.nsets): cols.extend([C(name='flux_%d' %(i+1), format='D', array=unpack(data.fluxes[i])), C(name='error_%d' %(i+1), format='D', array=unpack(data.errors[i])), C(name='mflags_%d' %(i+1), format='B', array=unpack(data.mflags[i])), C(name='trend_t_%d' %(i+1), format='D', array=unpack(dtres[i].tr_time)), C(name='trend_p_%d' %(i+1), format='D', array=unpack(dtres[i].tr_position))]) if pf_version >= 3.3: hdu = pf.BinTableHDU.from_columns(pf.ColDefs(cols)) else: hdu = pf.new_table(cols) hdu.header['extname'] = 'k2_detrend' hdu.header['object'] = data.epic hdu.header['epic'] = data.epic hdu.header['splits'] = str(splits) for i in range(data.nsets): hdu.header['cdpp%dr'%(i+1)] = dtres[i].cdpp_r hdu.header['cdpp%dt'%(i+1)] = dtres[i].cdpp_t hdu.header['cdpp%dc'%(i+1)] = dtres[i].cdpp_c hdu.header['ap%d_warn'%(i+1)] = dtres[i].warn hdu.header['ker_name'] = dtres[0].detrender.kernel.name hdu.header['ker_pars'] = ' '.join(dtres[0].detrender.kernel.names) hdu.header['ker_eqn'] = dtres[0].detrender.kernel.eq for i in range(data.nsets): hdu.header['ker_hps%d'%(i+1)] = str(dtres[i].detrender.tr_pv).replace('\n', '') hdu.header['origin'] = 'SPLOX: Stars and Planets at Oxford' hdu.header['program'] = 'k2_syscor v0.8' hdu.header['date'] = datetime.today().strftime('%Y-%m-%dT%H:%M:%S') primary_hdu = pf.PrimaryHDU(header=data.sap_header) hdu_list = pf.HDUList([primary_hdu, hdu]) hdu_list.writeto(fname, clobber=True)