Ejemplo n.º 1
0
def gluespec(datadir, filelist):
    mode=[]
    date=[]
    src=[]
    for myfile in filelist:
        src.append(myfile.split('_')[0])
        date.append(myfile.split('_')[2])
        mode.append(myfile.split('_')[4])
    outfile=src[0]+'_'+date[0]+'_'+'glue.fits'
    m0bool=(np.array(mode)=='M0')
    m1bool=(np.array(mode)=='M1')
    m2bool=(np.array(mode)=='M2')
    m3bool=(np.array(mode)=='M3')
    m0files=[]
    m1files=[]
    m2files=[]
    m3files=[]
    if(m0bool.any()):
        m0files=np.array(filelist)[m0bool]
        for i,myfile in enumerate(m0files):
            m0file=datadir+myfile
            hdulist=fits.open(m0file)
            data=hdulist[1].data
            wave=data['wave']
            div=data['div']
            std=data['std']
            if(i==0): 
                wave0_m0=wave
                std_m0=std
                div_all_m0=div
            else:
                f=interp1d(wave,div)
                div_interp=f(wave0_m0)
                div_all_m0=div_all_m0+div_interp
        div_all_m0=div_all_m0/np.size(m0files)
    else:
        div_all_m0=np.array([])
        wave0_m0=np.array([])
        std_m0=np.array([])
        
    if(m1bool.any()):
        m1files=np.array(filelist)[m1bool]
        for i,myfile in enumerate(m1files):
            m1file=datadir+myfile
            hdulist=fits.open(m1file)
            data=hdulist[1].data
            wave=data['wave']
            div=data['div']
            std=data['std']
            if(i==0): 
                wave0_m1=wave
                div_all_m1=div
                std_m1=std
            else:
                f=interp1d(wave,div)
                div_interp=f(wave0_m1)
                div_all_m1=div_all_m1+div_interp
        div_all_m1=div_all_m1/np.size(m1files)
    else:
        div_all_m1=np.array([])
        wave0_m1=np.array([])
        std_m1=np.array([])

    if(m2bool.any()):
        m2files=np.array(filelist)[m2bool]
        for i,myfile in enumerate(m2files):
            m2file=datadir+myfile
            hdulist=fits.open(m2file)
            data=hdulist[1].data
            wave=data['wave']
            div=data['div']
            std=data['std']
            if(i==0): 
                wave0_m2=wave
                div_all_m2=div
                std_m2=std
            else:
                f=interp1d(wave,div)
                div_interp=f(wave0_m2)
                div_all_m2=div_all_m2+div_interp
        div_all_m2=div_all_m2/np.size(m2files)
    else:
        div_all_m2=np.array([])
        wave0_m2=np.array([])
        std_m2=np.array([])

    if(m3bool.any()):
        m3files=np.array(filelist)[m3bool]
        for i,myfile in enumerate(m3files):
            m3file=datadir+myfile
            hdulist=fits.open(m3file)
            data=hdulist[1].data
            wave=data['wave']
            div=data['div']
            std=data['std']
            if(i==0): 
                wave0_m3=wave
                std_m3=std
                div_all_m3=div
            else:
                f=interp1d(wave,div)
                div_interp=f(wave0_m3)
                div_all_m3=div_all_m3+div_interp
        div_all_m3=div_all_m3/np.size(m3files)
    else:
        div_all_m3=np.array([])
        wave0_m3=np.array([])
        std_m3=np.array([])

    wave=np.concatenate(np.array([wave0_m0,wave0_m1,wave0_m2,wave0_m3]))
    div=np.concatenate(np.array([div_all_m0,div_all_m1,div_all_m2,div_all_m3]))
    std=np.concatenate(np.array([std_m0,std_m1,std_m2,std_m3]))

#Create columns                                                                                                             
    c1  = fits.Column(name='wave', format='D', array=wave)
    c2  = fits.Column(name='div', format='D', array=div)
    c3  = fits.Column(name='std', format='D', array=std)
    coldefs = fits.ColDefs([c1,c2,c3])
    tbhdu = fits.BinTableHDU.from_columns(coldefs)
    primary_hdu = fits.PrimaryHDU()
    hdul = fits.HDUList([primary_hdu, tbhdu])
#Write to fits file                                                                                                         
    hdul.writeto(outfile,overwrite=True)
    print('Writing to: ', outfile)
    
    return (wave,div)
Ejemplo n.º 2
0
def light_curve_fits(num_samples, num_energies):
    """
     Generate light curve fits structures given number of samples (times) and number of energies.

    Parameters
    ----------
    num_samples : int
        Number of samples
    num_energies : int
        Number of energies

    Returns
    -------
    astropy.io.fits.HDUList
        HDU list, primary and binary extensions data, energy, control.
    """
    control_columns = (fits.Column(name='INTEGRATION_TIME',
                                   unit='0.1s',
                                   format='I'),
                       fits.Column(name='DETECTOR_MASK',
                                   format='32B',
                                   array=np.zeros(1)),
                       fits.Column(name='PIXEL_MASK',
                                   format='12B',
                                   array=np.zeros(1)),
                       fits.Column(name='ENERGY_BIN_MASK',
                                   format='33B',
                                   array=np.zeros(1)),
                       fits.Column(name='COMPRESSION_SCHEME_COUNTS_SKM',
                                   format='3I',
                                   array=np.zeros((1, 3))),
                       fits.Column(name='COMPRESSION_SCHEME_TRIGGERS_SKM',
                                   format='3I',
                                   array=np.zeros((1, 3))))

    control_coldefs = fits.ColDefs(control_columns)
    control_hdu = fits.BinTableHDU.from_columns(control_coldefs)
    control_hdu.name = 'CONTROL'

    data_columns = (fits.Column(name='COUNTS',
                                format=f'{num_energies}J',
                                array=np.zeros((num_samples, num_energies))),
                    fits.Column(name='TRIGGERS',
                                format='J',
                                array=np.zeros(num_samples)),
                    fits.Column(name='RATE_CONTROL_REGIME',
                                format='B',
                                array=np.zeros(num_samples)),
                    fits.Column(name='CHANNEL',
                                format=f'{num_energies}B',
                                array=np.zeros(num_samples)),
                    fits.Column(name='TIME',
                                format='D',
                                array=np.zeros(num_samples)),
                    fits.Column(name='TIMEDEL',
                                format='E',
                                array=np.zeros(num_samples)),
                    fits.Column(name='LIVETIME',
                                format='J',
                                array=np.zeros(num_samples)),
                    fits.Column(name='ERROR',
                                format='J',
                                array=np.zeros(num_samples)))

    light_curve_hdu_list = _create_hdul(control_hdu, data_columns,
                                        num_energies)
    return light_curve_hdu_list
Ejemplo n.º 3
0
    def run(self):
        """
        Main routine. This stage:
        - Creates gamma1, gamma2 maps and corresponding masks from
          the reduced catalog for a set of redshift bins.
        - Stores the above into a single FITS file.
        """
        logger.info("Reading masked fraction from {}.".format(self.get_input("masked_fraction")))
        self.fsk, _ = read_flat_map(self.get_input("masked_fraction"))
        self.nbins = len(self.config['pz_bins'])-1

        logger.info("Reading calibrated shear catalog from {}.".format(self.get_input('clean_catalog')))
        hdul = fits.open(self.get_input('clean_catalog'))
        head_cat = hdul[0].header
        mhats = np.array([head_cat['MHAT_%d' % (ibin+1)]
                          for ibin in range(self.nbins)])
        resps = np.array([head_cat['RESPONS_%d' % (ibin+1)]
                          for ibin in range(self.nbins)])
        cat = hdul[1].data
        # Remove masked objects
        if self.config['mask_type'] == 'arcturus':
            self.msk = cat['mask_Arcturus'].astype(bool)
        elif self.config['mask_type'] == 'sirius':
            self.msk = np.logical_not(cat['iflags_pixel_bright_object_center'])
            self.msk *= np.logical_not(cat['iflags_pixel_bright_object_any'])
        else:
            raise KeyError("Mask type "+self.config['mask_type'] +
                           " not supported. Choose arcturus or sirius")
        self.msk *= cat['wl_fulldepth_fullcolor']
        cat = cat[self.msk]

        logger.info("Reading pdf filenames")
        data_syst = np.genfromtxt(self.get_input('pdf_matched'),
                                  dtype=[('pzname', '|U8'),
                                         ('fname', '|U256')])
        self.pdf_files = {n: fn
                          for n, fn in zip(np.atleast_1d(data_syst['pzname']),
                                           np.atleast_1d(data_syst['fname']))}

        logger.info("Getting COSMOS N(z)s")
        pzs_cosmos = self.get_nz_cosmos()

        logger.info("Getting pdf stacks")
        pzs_stack = {}
        for n in self.pdf_files.keys():
            pzs_stack[n] = self.get_nz_stack(cat, n)

        logger.info("Computing e2rms.")
        e2rms = self.get_e2rms(cat)

        logger.info("Computing w2e2.")
        w2e2 = self.get_w2e2(cat)

        logger.info("Creating shear maps and corresponding masks.")
        gammamaps = self.get_gamma_maps(cat)

        logger.info("Writing output to {}.".format(self.get_output('gamma_maps')))
        header = self.fsk.wcs.to_header()
        hdus = []
        shp_mp = [self.fsk.ny, self.fsk.nx]
        for im, m_list in enumerate(gammamaps):
            # Maps
            head = header.copy()
            head['DESCR'] = ('gamma1, bin %d' % (im+1),
                             'Description')
            if im == 0:
                hdu = fits.PrimaryHDU(data=m_list[0][0].reshape(shp_mp),
                                      header=head)
            else:
                hdu = fits.ImageHDU(data=m_list[0][0].reshape(shp_mp),
                                    header=head)
            hdus.append(hdu)
            head = header.copy()
            head['DESCR'] = ('gamma2, bin %d' % (im+1), 'Description')
            hdu = fits.ImageHDU(data=m_list[0][1].reshape(shp_mp),
                                header=head)
            hdus.append(hdu)
            head = header.copy()
            head['DESCR'] = ('gamma weight mask, bin %d' % (im+1),
                             'Description')
            hdu = fits.ImageHDU(data=m_list[1][0].reshape(shp_mp),
                                header=head)
            hdus.append(hdu)
            head['DESCR'] = ('gamma binary mask, bin %d' % (im+1),
                             'Description')
            hdu = fits.ImageHDU(data=m_list[1][1].reshape(shp_mp),
                                header=head)
            hdus.append(hdu)
            head['DESCR'] = ('counts map (shear sample), bin %d' % (im+1),
                             'Description')
            hdu = fits.ImageHDU(data=m_list[1][2].reshape(shp_mp),
                                header=head)
            hdus.append(hdu)

            cols = [fits.Column(name='z_i', array=pzs_cosmos[im, 0, :],
                                format='E'),
                    fits.Column(name='z_f', array=pzs_cosmos[im, 1, :],
                                format='E'),
                    fits.Column(name='nz_cosmos', array=pzs_cosmos[im, 2, :],
                                format='E'),
                    fits.Column(name='enz_cosmos', array=pzs_cosmos[im, 3, :],
                                format='E')]
            for n in self.pdf_files.keys():
                cols.append(fits.Column(name='nz_'+n,
                                        array=pzs_stack[n][im, 2, :],
                                        format='E'))
            hdus.append(fits.BinTableHDU.from_columns(cols))
        # e2rms
        cols = [fits.Column(name='e2rms', array=e2rms, format='2E'),
                fits.Column(name='w2e2', array=w2e2, format='E'),
                fits.Column(name='mhats', array=mhats, format='E'),
                fits.Column(name='resps', array=resps, format='E')]
        hdus.append(fits.BinTableHDU.from_columns(cols))

        hdulist = fits.HDUList(hdus)
        hdulist.writeto(self.get_output('gamma_maps'), overwrite=True)


        # Plotting
        for im, m_list in enumerate(gammamaps):
            plot_map(self.config, self.fsk, m_list[0][0], 'gamma1_%d' % im)
            plot_map(self.config, self.fsk, m_list[0][1], 'gamma2_%d' % im)
            plot_map(self.config, self.fsk, m_list[1][0], 'gamma_w_%d' % im)
            plot_map(self.config, self.fsk, m_list[1][1], 'gamma_b_%d' % im)
            plot_map(self.config, self.fsk, m_list[1][2], 'gamma_c_%d' % im)
            z = 0.5 * (pzs_cosmos[im, 0, :] + pzs_cosmos[im, 1, :])
            nzs = [pzs_cosmos[im, 2, :]]
            names = ['COSMOS']
            for n in self.pdf_files.keys():
                nzs.append(pzs_stack[n][im, 2, :])
                names.append(n)
            plot_curves(self.config, 'nz_%d' % im,
                        z, nzs, names, xt=r'$z$', yt=r'$N(z)$')
        x = np.arange(self.nbins)
        plot_curves(self.config, 'mhat', np.arange(self.nbins),
                    [mhats], ['m_hat'], xt='bin', yt=r'$\hat{m}$')
        plot_curves(self.config, 'resp', np.arange(self.nbins),
                    [resps], ['resp'], xt='bin', yt=r'$R$')

        # Permissions on NERSC
        os.system('find /global/cscratch1/sd/damonge/GSKY/ -type d -exec chmod -f 777 {} \;')
        os.system('find /global/cscratch1/sd/damonge/GSKY/ -type f -exec chmod -f 666 {} \;')
Ejemplo n.º 4
0
def write_mod_fits(pixel, script_path,path=None,prefix=''):  
  
  if path is None: path=""
  root=os.path.join(script_path,pixel)
  
  xbandfiles = sorted(glob.glob(root+'-*.wav'))
  band = []
  npix = []
  for entry in xbandfiles:
    match = re.search('-[\w]*.wav',entry)
    tag = match.group()[1:-4]
    if match: band.append(tag.upper())
    x = loadtxt(root+'-'+tag+'.wav')
    npix.append(len(x))
    
  x = loadtxt(root+'.wav')
  if len(npix) == 0: npix.append(len(x))

  m=glob.glob(root+".mdl")
  e=glob.glob(root+".err")
  n=glob.glob(root+".nrd")

  fmp=glob.glob(root+".fmp.fits")  
  mdata=loadtxt(m[0])
  edata=loadtxt(e[0])
  if (len(n) > 0): 
    odata=loadtxt(n[0])
    f=glob.glob(root+".frd")
    fdata=loadtxt(f[0])
    edata=edata/fdata*odata
  else:
    odata=loadtxt(root+".frd")  

  hdu0=fits.PrimaryHDU()
  now = datetime.datetime.fromtimestamp(time.time())
  nowstr = now.isoformat() 
  nowstr = nowstr[:nowstr.rfind('.')]
  hdu0.header['DATE'] = nowstr
  hdulist = [hdu0]

  i = 0
  j1 = 0

  for entry in band:
    j2 = j1 + npix[i] 
    #print(entry,i,npix[i],j1,j2)
    #colx = fits.Column(name='wavelength',format='e8', array=array(x[j1:j2]))
    #coldefs = fits.ColDefs([colx])
    #hdu = fits.BinTableHDU.from_columns(coldefs)
    hdu = fits.ImageHDU(name=entry+'_WAVELENGTH', data=x[j1:j2])
    #hdu.header['EXTNAME']=entry+'_WAVELENGTH'
    hdulist.append(hdu)
    
    if odata.ndim == 2: tdata = odata[:,j1:j2]
    else: tdata = odata[j1:j2][None,:]
    col01 = fits.Column(name='obs',format=str(npix[i])+'e8', dim='('+str(npix[i])+')', array=tdata)
    if edata.ndim == 2: tdata = edata[:,j1:j2]
    else: tdata = edata[j1:j2][None,:]
    col02 = fits.Column(name='err',format=str(npix[i])+'e8', dim='('+str(npix[i])+')', array=tdata)
    if mdata.ndim == 2: tdata = mdata[:,j1:j2]
    else: tdata = mdata[j1:j2][None,:]
    col03 = fits.Column(name='fit',format=str(npix[i])+'e8', dim='('+str(npix[i])+')', array=tdata)    
    coldefs = fits.ColDefs([col01,col02,col03])
    hdu=fits.BinTableHDU.from_columns(coldefs, name=entry+'_MODEL')
    #hdu = fits.ImageHDU(name=entry+'_MODEL', data=stack([odata[:,j1:j2],edata[:,j1:j2],mdata[:,j1:j2]]) ) 
    #hdu.header['EXTNAME']=entry+'_MODEL'
    hdulist.append(hdu)
    i += 1
    j1 = j2

  if len(fmp) > 0:
    ff=fits.open(fmp[0])
    fibermap=ff[1]
    hdu=fits.BinTableHDU.from_columns(fibermap, name='FIBERMAP')
    #hdu.header['EXTNAME']='FIBERMAP'
    hdulist.append(hdu)

  hdul=fits.HDUList(hdulist)
  if prefix == '':
  	spmod_name=path+'/spmod-64-'+pixel+'.fits'
  else:
  	spmod_name=path+'/spmod_'+prefix
  hdul.writeto(spmod_name)
  
  return None
Ejemplo n.º 5
0
def variance_fits(num_samples, num_energies):
    """
    Generate variance fits structures given number of samples (times) and number of energies.

    num_samples : int
        Number of samples
    num_energies : int
        Number of energies

    Returns
    -------
    astropy.io.fits.HDUList
        HDU list, primary and binary extensions data, energy, control.
    """
    control_columns = (
        fits.Column(name='INTEGRATION_TIME', format='I'),
        fits.Column(name='SAMPLES', format='I'),
        fits.Column(name='DETECTOR_MASK', format='32B', array=np.zeros(1)),
        fits.Column(name='PIXEL_MASK', format='12B', array=np.zeros(1)),
        fits.Column(name='ENERGY_BIN_MASK', format='32B', array=np.zeros(1)),
        fits.Column(name='COMPRESSION_SCHEME_VARIANCE-SKM',
                    format='3I',
                    array=np.zeros((1, 3))),
    )

    control_coldefs = fits.ColDefs(control_columns)
    control_hdu = fits.BinTableHDU.from_columns(control_coldefs)
    control_hdu.name = 'CONTROL'

    data_columns = (fits.Column(name='VARIANCE',
                                format=f'J',
                                array=np.zeros(num_samples)),
                    fits.Column(name='CHANNEL',
                                format=f'{num_energies}J',
                                array=np.zeros(num_samples)),
                    fits.Column(name='TIME',
                                format='D',
                                array=np.zeros(num_samples)),
                    fits.Column(name='TIMEDEL',
                                format='E',
                                array=np.zeros(num_samples)),
                    fits.Column(name='LIVETIME',
                                format='I',
                                array=np.zeros(num_samples)),
                    fits.Column(name='ERROR',
                                format='J',
                                array=np.zeros(num_samples)))

    variance_hdu_list = _create_hdul(control_hdu, data_columns, num_energies)
    return variance_hdu_list
Ejemplo n.º 6
0
    def package_as_fits(self, fname=None):
        ''' ---------------------------------------------------------------
        Packages the KPI data structure into a multi-extension FITS,
        that may be written to disk. Returns a hdu list.

        Parameters:
        ----------

        - fname: a file name. If provided, the hdulist is saved as a
        fits file.
        --------------------------------------------------------------- '''

        # prepare the data for fits table format
        # --------------------------------------
        xy1 = fits.Column(name='XXC', format='D', array=self.VAC[:, 0])
        xy2 = fits.Column(name='YYC', format='D', array=self.VAC[:, 1])
        trm = fits.Column(name='TRM', format='D', array=self.TRM)

        uv1 = fits.Column(name='UUC', format='D', array=self.UVC[:, 0])
        uv2 = fits.Column(name='VVC', format='D', array=self.UVC[:, 1])
        uv3 = fits.Column(name='RED', format='D', array=self.RED)

        # make up a primary HDU
        # ---------------------
        hdr = fits.Header()
        hdr['SOFTWARE'] = 'XARA'
        hdr['KPI-ID'] = self.name[:8]
        hdr['GRID'] = (False, "True for integer grid mode")
        hdr['G-STEP'] = (0.0, "Used for integer grid mode")
        hdr.add_comment("File created by the XARA python pipeline")
        try:
            _ = self.BMAX
            hdr.add_comment("Model filtering baselines > %.1f meters" %
                            (self.BMAX))
        except AttributeError:
            hdr.add_comment("Discrete model is complete")
        pri_hdu = fits.PrimaryHDU(header=hdr)

        # APERTURE HDU
        # ------------
        tb1_hdu = fits.BinTableHDU.from_columns([xy1, xy2, trm])
        tb1_hdu.header['EXTNAME'] = 'APERTURE'
        tb1_hdu.header['TTYPE1'] = ('XXC',
                                    'Virtual aperture x-coord (in meters)')
        tb1_hdu.header['TTYPE2'] = ('YYC',
                                    'Virtual aperture y-coord (in meters)')
        tb1_hdu.header['TTYPE3'] = (
            'TRM', 'Virtual aperture transmission (0 < t <=1)')

        # UV-PLANE HDU
        # ------------
        tb2_hdu = fits.BinTableHDU.from_columns([uv1, uv2, uv3])
        tb2_hdu.header['TTYPE1'] = ('UUC', 'Baseline u coordinate (in meters)')
        tb2_hdu.header['TTYPE2'] = ('VVC', 'Baseline v coordinate (in meters)')
        tb2_hdu.header['TTYPE3'] = ('RED', 'Baseline redundancy (float)')
        tb2_hdu.header['EXTNAME'] = 'UV-PLANE'

        # KER-MAT HDU
        # -----------
        kpm_hdu = fits.ImageHDU(self.KerPhi)
        kpm_hdu.header.add_comment("Kernel-phase Matrix")
        kpm_hdu.header['EXTNAME'] = 'KER-MAT'

        # BLM-MAT HDU
        # -----------
        # BLM = (np.diag(self.RED).dot(self.TFM))#.astype(np.int)
        blm_hdu = fits.ImageHDU(self.BLM)
        blm_hdu.header.add_comment("Baseline Mapping Matrix")
        blm_hdu.header['EXTNAME'] = 'BLM-MAT'

        # compile HDU list and save
        # -------------------------

        self.hdul = fits.HDUList([pri_hdu, tb1_hdu, tb2_hdu, kpm_hdu, blm_hdu])

        if fname is not None:
            self.hdul.writeto(fname, overwrite=True)
        return (self.hdul)
Ejemplo n.º 7
0
def write_data_header(col1_data,
                      col2_data,
                      col1_label='ENERGY',
                      col2_label='RATE'):

    # Write data into fits columns
    c1 = fits.Column(name=col1_label, array=col1_data, format='J')
    c2 = fits.Column(name=col2_label, array=col2_data, format='J')

    columns = fits.ColDefs([c1, c2])
    hdu = fits.BinTableHDU.from_columns(columns)

    # Write header key words
    hdu.header.set('XTENSION',
                   value='BINTABLE',
                   comment='binary table extension')
    hdu.header.set('BITPIX', value=8, comment='8-bit bytes')
    hdu.header.set('NAXIS', value=2, comment='2-dimensional binary table')
    hdu.header.set('NAXIS1', value=12, comment='width of table in bytes')
    hdu.header.set('NAXIS2',
                   value=len(col1_data),
                   comment='number of rows in table')
    hdu.header.set('PCOUNT', value=0, comment='size of special data area')
    hdu.header.set('GCOUNT',
                   value=1,
                   comment='one data group (required keyword)')
    hdu.header.set('TFIELDS', value=2, comment='number of fields in each row')
    hdu.header.set('TTYPE1', value=col1_label, comment='label for field   1')
    hdu.header.set('TFORM1',
                   value='J',
                   comment='data format of field: 4-byte INTEGER')
    hdu.header.set('TTYPE2', value=col2_label, comment='label for field   2')
    hdu.header.set('TFORM2',
                   value='J',
                   comment='data format of field: 4-byte INTEGER')
    hdu.header.set('EXTNAME',
                   value='SPECTRUM',
                   comment='name of this binary table extension')
    hdu.header.set('HDUCLASS',
                   value='OGIP',
                   comment='format conforms to OGIP standard')
    hdu.header.set('HDUCLAS1',
                   value='SPECTRUM',
                   comment='PHA dataset (OGIP memo OGIP-92-007)')
    hdu.header.set('HDUVERS1',
                   value='1.2.1',
                   comment='Version of format (OGIP memo OGIP-92-007a)')
    hdu.header.set('TELESCOP',
                   value='UNKNOWN ',
                   comment='mission/satellite name')
    hdu.header.set('INSTRUME',
                   value='UNKNOWN ',
                   comment='instrument/detector name')
    hdu.header.set('CHANTYPE',
                   value='PHA',
                   comment='channel type (PHA, PI etc)')
    hdu.header.set('HISTORY', value="Simulated data written in XSPEC form")
    hdu.header.set('RESPFILE',
                   value='        ',
                   comment='associated redistrib matrix filename')
    hdu.header.set('ANCRFILE',
                   value='        ',
                   comment='associated ancillary response filename')
    hdu.header.set('CORRFILE',
                   value='        ',
                   comment='associated correction filename')
    hdu.header.set('CORRSCAL',
                   value=-1.,
                   comment='correction file scaling factor')
    hdu.header.set('BACKFILE',
                   value='        ',
                   comment=' associated background filename')
    hdu.header.set('EXPOSURE', value=1., comment='exposure (in seconds)')
    hdu.header.set('TLMIN1', value=1, comment='Lowest legal channel number')
    hdu.header.set('TLMAX1',
                   value=5000,
                   comment='Highest legal channel number')
    hdu.header.set('DETCHANS',
                   value=5000,
                   comment='total number possible channels')
    hdu.header.set('POISSERR', value='T', comment='Pois. err assumed ?')
    hdu.header.set('AREASCAL', value=1., comment='area scaling factor')
    hdu.header.set('BACKSCAL',
                   value=1.,
                   comment='background file scaling factor')

    return hdu
Ejemplo n.º 8
0
    def to_fits(self, header=None, **kwargs):
        """
        Convert psf table data to FITS hdu list.

        Any FITS header keyword can be passed to the function and will be
        changed in the header.

        Parameters
        ----------
        header : `~astropy.io.fits.header.Header`
            Header to be written in the fits file.

        Returns
        -------
        hdu_list : `~astropy.io.fits.HDUList`
            PSF in HDU list format.
        """
        # Set up header
        if header is None:
            from ..datasets import load_psf_fits_table
            header = load_psf_fits_table()[1].header
        header['LO_THRES'] = self.energy_thresh_lo.value
        header['HI_THRES'] = self.energy_thresh_hi.value

        for key, value in kwargs.items():
            header[key] = value

        # Set up data
        names = [
            'ENERG_LO', 'ENERG_HI', 'THETA_LO', 'THETA_HI', 'AZIMUTH_LO',
            'AZIMUTH_HI', 'ZENITH_LO', 'ZENITH_HI', 'SCALE', 'SIGMA_1',
            'AMPL_2', 'SIGMA_2', 'AMPL_3', 'SIGMA_3'
        ]
        formats = [
            '15E', '15E', '12E', '12E', '1E', '1E', '1E', '1E', '180E', '180E',
            '180E', '180E', '180E', '180E'
        ]
        data = [
            self.energy_lo, self.energy_hi, self.theta, self.theta,
            self._azimuth, self._azimuth, self._zenith, self._zenith,
            self.norms[0].flatten(), self.sigmas[0].flatten(),
            self.norms[1].flatten(), self.sigmas[1].flatten(),
            self.norms[2].flatten(), self.sigmas[2].flatten()
        ]
        units = [
            'TeV', 'TeV', 'deg', 'deg', 'deg', 'deg', 'deg', 'deg', '', 'deg',
            '', 'deg', '', 'deg'
        ]

        # Set up columns
        columns = []
        for name_, format_, data_, unit_ in zip(names, formats, data, units):
            if isinstance(data_, Quantity):
                data_ = data_.value
            columns.append(
                fits.Column(name=name_,
                            format=format_,
                            array=[data_],
                            unit=unit_))
        # Create hdu and hdu list
        prim_hdu = fits.PrimaryHDU()
        hdu = fits.BinTableHDU.from_columns(columns)
        hdu.header = header
        hdu.add_checksum()
        hdu.add_datasum()
        return fits.HDUList([prim_hdu, hdu])
Ejemplo n.º 9
0
def create_wcscorr(descrip=False, numrows=1, padding=0):
    """
    Return the basic definitions for a WCSCORR table.
    The dtype definitions for the string columns are set to the maximum allowed so
    that all new elements will have the same max size which will be automatically
    truncated to this limit upon updating (if needed).

    The table is initialized with rows corresponding to the OPUS solution
    for all the 'SCI' extensions.
    """

    trows = numrows + padding
    # define initialized arrays as placeholders for column data
    # TODO: I'm certain there's an easier way to do this... for example, simply
    # define the column names and formats, then create an empty array using
    # them as a dtype, then create the new table from that array.
    def_float64_zeros = np.array([0.0] * trows, dtype=np.float64)
    def_float64_ones = def_float64_zeros + 1.0
    def_float_col = {'format': 'D', 'array': def_float64_zeros.copy()}
    def_float1_col = {'format': 'D', 'array': def_float64_ones.copy()}
    def_str40_col = {
        'format': '40A',
        'array': np.array([''] * trows, dtype='S40')
    }
    def_str24_col = {
        'format': '24A',
        'array': np.array([''] * trows, dtype='S24')
    }
    def_int32_col = {
        'format': 'J',
        'array': np.array([0] * trows, dtype=np.int32)
    }

    # If more columns are needed, simply add their definitions to this list
    col_names = [('HDRNAME', def_str24_col), ('SIPNAME', def_str24_col),
                 ('NPOLNAME', def_str24_col), ('D2IMNAME', def_str24_col),
                 ('CRVAL1', def_float_col), ('CRVAL2', def_float_col),
                 ('CRPIX1', def_float_col), ('CRPIX2', def_float_col),
                 ('CD1_1', def_float_col), ('CD1_2', def_float_col),
                 ('CD2_1', def_float_col), ('CD2_2', def_float_col),
                 ('CTYPE1', def_str24_col), ('CTYPE2', def_str24_col),
                 ('ORIENTAT', def_float_col), ('PA_V3', def_float_col),
                 ('RMS_RA', def_float_col), ('RMS_Dec', def_float_col),
                 ('NMatch', def_int32_col), ('Catalog', def_str40_col)]

    # Define selector columns
    id_col = fits.Column(name='WCS_ID',
                         format='40A',
                         array=np.array(['OPUS'] * numrows + [''] * padding,
                                        dtype='S24'))
    extver_col = fits.Column(name='EXTVER',
                             format='I',
                             array=np.array(list(range(1, numrows + 1)),
                                            dtype=np.int16))
    wcskey_col = fits.Column(name='WCS_key',
                             format='A',
                             array=np.array(['O'] * numrows + [''] * padding,
                                            dtype='S'))
    # create list of remaining columns to be added to table
    col_list = [id_col, extver_col, wcskey_col]  # start with selector columns

    for c in col_names:
        cdef = copy.deepcopy(c[1])
        col_list.append(
            fits.Column(name=c[0], format=cdef['format'], array=cdef['array']))

    if descrip:
        col_list.append(
            fits.Column(name='DESCRIP',
                        format='128A',
                        array=np.array(['Original WCS computed by OPUS'] *
                                       numrows,
                                       dtype='S128')))

    # Now create the new table from the column definitions
    newtab = fits.BinTableHDU.from_columns(fits.ColDefs(col_list), nrows=trows)
    # The fact that setting .name is necessary should be considered a bug in
    # pyfits.
    # TODO: Make sure this is fixed in pyfits, then remove this
    newtab.name = 'WCSCORR'

    return newtab
Ejemplo n.º 10
0
def save_HIspec_fits(target_info,
                     savedir='.',
                     beam=1.,
                     observation='HI4PI',
                     datadir='/Volumes/YongData2TB/HI4PI'):
    '''
    To obtain the corresponding HI 21cm spec with certain beam of LAB.

    target_info = {'NAME': target,
                   'RA': right ascension (J2000),
                   'DEC': declination (J2000),
                   'l': Galactic latitude (J2000),
                   'b': Galactic longitude (J2000),
                  }

    beam: to decide within what diameter (in deg) the HI spec is averaged.
    datadir: for LAB data, you give directory and the cube name
             e.g., datadir='/Users/Yong/Dropbox/databucket/LAB/labh_glue.fits'
             for HI4PI and GALFA-HI data, you give the directory of the cubes
             e.g., datadir='/Volumes/YongData2TB/HI4PI'
                   datadir='/Volumes/YongData2TB/GALFAHI_DR2/RC5/Wide'
    '''

    import astropy.io.fits as fits
    import numpy as np
    import os

    if observation not in ['HI4PI', 'LAB', 'GALFA-HI', 'GALFAHI']:
        logger.info('Do not recognize %s' % (observation))
        return False

    ## create the primary header for this spectra
    prihdu = create_primary_header_HI(target_info,
                                      observation=observation,
                                      beam=beam)

    ## extract HI spectra from certain HI survey, and create the fits data extension
    if observation == 'LAB':
        hivel, hispec = extract_LAB(target_info['l'],
                                    target_info['b'],
                                    beam=beam,
                                    labfile=datadir)
    else:  # for HI4PI or GALFA-HI
        hivel, hispec = extract_HI4PI_GALFAHI(target_info['RA'],
                                              target_info['DEC'],
                                              beam=beam,
                                              observation=observation,
                                              datadir=datadir)
    ## this is mostly for GALFA-HI, which it only covers from DEC=-1 to 38 degree.
    if type(hivel) == bool:
        return False

    col1 = fits.Column(name='VLSR', format='D', array=hivel)
    col2 = fits.Column(name='FLUX', format='D', array=hispec)
    cols = fits.ColDefs([col1, col2])
    tbhdu = fits.BinTableHDU.from_columns(cols)
    tbhdu.header['TUNIT1'] = 'km/s'  # (or whatever unit the "WAVE" column is).
    tbhdu.header[
        'TUNIT2'] = 'K'  # for the flux array or whatever unit that column is

    ## now save the data
    thdulist = fits.HDUList([prihdu, tbhdu])
    if os.path.isdir(savedir) is False: os.makedirs(savedir)
    obs_tag = observation.lower().replace('-', '')
    hifile = '%s/hlsp_cos-gal_%s_%s_%s_21cm_v1_h-i-21cm-spec-beam%.3fdeg.fits.gz' % (
        savedir, obs_tag, obs_tag, target_info['NAME'].lower(), beam)
    thdulist.writeto(hifile, clobber=True)
    return hifile
Ejemplo n.º 11
0
def extract_HI21cm(target_info, filedir='.', observation='HI4PI', beam=1.):
    '''
    To obtain the corresponding HI data for the QSO sightlines. Can be used
    to obtain from HI4PI (EBHIS+GASS) cubes. HI4PI has res of 10.8 arcmin,
    each pixel has 3.25 arcmin.
    YZ noted on Mar 1, 2018: this func has been replaced by save_HIspec_fits,
       create_primary_header_HI, and cubes_within_beam.

    beam: to decide within what diameter (in deg) the HI spec is averaged.
    '''

    from yzGALFAHI.get_cubeinfo import get_cubeinfo
    from astropy.coordinates import SkyCoord
    import astropy.io.fits as fits
    from astropy.table import Table

    target = target_info['NAME']
    beam_radius = beam / 2.

    if observation == 'LAB':
        labfile = '/Users/Yong/Dropbox/databucket/LAB/labh_glue.fits'
        labdata = fits.getdata(labfile)
        labhdr = fits.getheader(labfile)

        gl, gb, cvel = get_cubeinfo(labhdr)
        tar_coord = SkyCoord(l=target_info['l'],
                             b=target_info['b'],
                             unit='deg',
                             frame='galactic')
        cube_coord = SkyCoord(l=gl, b=gb, unit='deg', frame='galactic')
        dist = tar_coord.separation(cube_coord)

        dist_copy = dist.value.copy()
        within_beam_2d = dist_copy <= beam_radius
        within_beam_3d = np.asarray([within_beam_2d] * cvel.size)

        labdata_copy = labdata.copy()
        labdata_copy[np.logical_not(within_beam_3d)] = np.nan

        ispec = np.nanmean(np.nanmean(labdata_copy, axis=2), axis=1)

        # save the spectrum
        prihdr = fits.Header()
        prihdr['OBS'] = observation
        prihdr.comments['OBS'] = 'See %s publication.' % (observation)
        prihdr['CREATOR'] = "YZ"
        prihdr[
            'COMMENT'] = "HI 21cm spectrum averaged within beam size of %.2f deg. " % (
                beam)
        import datetime as dt
        prihdr['DATE'] = str(dt.datetime.now())
        prihdu = fits.PrimaryHDU(header=prihdr)

        ## table
        col1 = fits.Column(name='VLSR', format='D', array=cvel)
        col2 = fits.Column(name='FLUX', format='D', array=ispec)
        cols = fits.ColDefs([col1, col2])
        tbhdu = fits.BinTableHDU.from_columns(cols)
        thdulist = fits.HDUList([prihdu, tbhdu])

        if os.path.isdir(filedir) is False: os.makedirs(filedir)
        if beam >= 1.:
            hifile = '%s/%s_HI21cm_%s_Beam%ddeg.fits.gz' % (filedir, target,
                                                            observation, beam)
        else:
            hifile = '%s/%s_HI21cm_%s_Beam%darcmin.fits.gz' % (
                filedir, target, observation, beam * 60)

        thdulist.writeto(hifile, clobber=True)

    elif observation in ['HI4PI', 'GALFA_HI']:
        tar_coord = SkyCoord(ra=target_info['RA'],
                             dec=target_info['DEC'],
                             unit='deg')

        ### use the input ra/dec to decide which cube to explore.
        if observation == 'HI4PI':
            datadir = '/Volumes/YongData2TB/' + observation
        else:
            datadir = '/Volumes/YongData2TB/GALFAHI_DR2/DR2W_RC5/DR2W_RC5/Wide'

        clt = Table.read('%s/%s_RADEC.dat' % (datadir, observation),
                         format='ascii')

        cubefiles = []
        for ic in range(len(clt)):
            cubefile = datadir + '/' + clt['cubename'][ic] + '.fits'
            cubehdr = fits.getheader(cubefile)
            cra, cdec, cvel = get_cubeinfo(cubehdr)
            cube_coord = SkyCoord(ra=cra, dec=cdec, unit='deg')
            dist_coord = tar_coord.separation(cube_coord)

            dist = dist_coord.value
            within_beam_2d = dist <= beam_radius
            if dist[within_beam_2d].size > 0:
                cubefiles.append(cubefile)

        specs = []
        for cubefile in cubefiles:
            cubehdr = fits.getheader(cubefile)
            cra, cdec, cvel = get_cubeinfo(cubehdr)
            cube_coord = SkyCoord(ra=cra, dec=cdec, unit='deg')
            dist_coord = tar_coord.separation(cube_coord)

            dist = dist_coord.value
            within_beam_2d = dist <= beam_radius
            within_beam_3d = np.asarray([within_beam_2d] * cvel.size)

            cubedata = fits.getdata(cubefile)
            cubedata[np.logical_not(within_beam_3d)] = np.nan

            ispec = np.nanmean(np.nanmean(cubedata, axis=2), axis=1)
            specs.append(ispec)

        ispec = np.mean(np.asarray(specs), axis=0)

        # save the spectrum
        prihdr = fits.Header()
        prihdr['OBS'] = observation
        prihdr.comments['OBS'] = 'See %s publication.' % (observation)
        prihdr['CREATOR'] = "YZ"
        prihdr[
            'COMMENT'] = "HI 21cm spectrum averaged within beam size of %.2f deg. " % (
                beam)
        import datetime as dt
        prihdr['DATE'] = str(dt.datetime.now())
        prihdu = fits.PrimaryHDU(header=prihdr)

        ## table
        col1 = fits.Column(name='VLSR', format='D', array=cvel)
        col2 = fits.Column(name='FLUX', format='D', array=ispec)
        cols = fits.ColDefs([col1, col2])
        tbhdu = fits.BinTableHDU.from_columns(cols)
        thdulist = fits.HDUList([prihdu, tbhdu])

        if os.path.isdir(filedir) is False: os.makedirs(filedir)

        if beam >= 1.:
            hifile = '%s/%s_HI21cm_%s_Beam%ddeg.fits.gz' % (filedir, target,
                                                            observation, beam)
        else:
            hifile = '%s/%s_HI21cm_%s_Beam%darcmin.fits.gz' % (
                filedir, target, observation, beam * 60)
        thdulist.writeto(hifile, clobber=True)

    else:
        logger.info('%s are not in [LAB, HI4PI]' % (observation))
        hifile = ''
    return hifile
Ejemplo n.º 12
0
def create_fits(master_folder, data_fits, **kwargs):
    """
    Export the results to a fits file, containing the light curves and a correspondence of star to Cv and
     respective radius factor. 



    Parameters
    ----------
        master_folder:
            Path in which the data shall be stored
        data_fits
            :class:`~pyarchi.data_objects.Data.Data` object.
        kwargs


    Notes
    -----

        Data stored in the header unit of the file :
            Keyword      data
            
            method        type of mask used
            detect         tracking method 
            initial        initial detection method 
            grid           size of the background grid
            CDPP_TYPE       CDPP algorithm in use
        
       In the data unit of the file, we have each star, with the corresponding
        time, rotation angle, flux values and uncertainties
    """

    # TODO: store more info -> improve organization

    logger.info("Extracting data to FITS file")
    hdus = []
    default_path = path_finder(mode="default", **kwargs)
    try:
        hdulist = fits.open(default_path)
    except IOError:
        logger.fatal("File does not exist")
        return -1
    else:
        with hdulist:
            roll_ang = hdulist[1].data["ROLL_ANGLE"]
            mjd_time = hdulist[1].data["MJD_TIME"]

    col1 = fits.Column(name="MJD_TIME", format="E", array=mjd_time)
    col2 = fits.Column(name="Rotation", unit="deg", format="E", array=roll_ang)

    send_cols = [col1, col2]
    for star in data_fits.stars:
        send_cols.append(fits.Column(name=star.name, format="E", array=star.photom))
        send_cols.append(
            fits.Column(
                name="FLUX_ERR_{}".format(star.number),
                format="E",
                array=star.uncertainties,
            )
        )
    cols = fits.ColDefs(send_cols)

    hdr = fits.Header()
    hdr["method"] = kwargs["method"]
    hdr["detect"] = kwargs["detect_mode"]
    hdr["initial"] = kwargs["initial_detect"]

    hdr["grid"] = kwargs["grid_bg"]
    hdr["CDPPTYPE"] = kwargs["CDPP_type"]

    primary_hdu = fits.PrimaryHDU(header=hdr)
    hdus.append(primary_hdu)
    hdu = fits.BinTableHDU.from_columns(cols, name="Photometry")
    hdus.append(hdu)

    col1 = fits.Column(
        name="Star", format="B", array=[star.number for star in data_fits.stars]
    )
    col2 = fits.Column(
        name="Cv",
        format="E",
        array=[star.calculate_cdpp(data_fits.mjd_time)[0] for star in data_fits.stars],
    )
    col3 = fits.Column(
        name="Factors", format="E", array=[star.mask_factor for star in data_fits.stars]
    )
    col4 = fits.Column(
        name="Out bounds", format="E", array=[star.out_bound for star in data_fits.stars]
    )
    cols = fits.ColDefs([col1, col2, col3, col4])
    hdu = fits.BinTableHDU.from_columns(cols, name="General")
    hdus.append(hdu)

    hdul = fits.HDUList(hdus)

    hdul.writeto(os.path.join(master_folder, "pyarchi_output.fits"), overwrite=True)

    logger.info("Fit file was created")
Ejemplo n.º 13
0
    def make_hdu(self, data, **kwargs):
        """ Builds and returns a FITs HDU with input data

        data      : The data begin stored

        Keyword arguments
        -------------------
        extname   : The HDU extension name        
        colbase   : The prefix for column names
        """
        shape = data.shape
        extname = kwargs.get('extname', self.conv.extname)

        if shape[-1] != self._npix:
            raise Exception(
                "Size of data array does not match number of pixels")
        cols = []

        if self._ipix is not None:
            cols.append(fits.Column(self.conv.idxstring, "J",
                                    array=self._ipix))

        if self.conv.convname == 'FGST_SRCMAP_SPARSE':
            nonzero = data.nonzero()
            nfilled = len(nonzero[0])
            if len(shape) == 1:
                cols.append(
                    fits.Column("PIX", "J", array=nonzero[0].astype(int)))
                cols.append(
                    fits.Column("VALUE",
                                "E",
                                array=data.flat[nonzero].astype(float).reshape(
                                    nfilled)))
            elif len(shape) == 2:
                keys = self._npix * nonzero[0] + nonzero[1]
                cols.append(
                    fits.Column("PIX", "J", array=nonzero[1].reshape(nfilled)))
                cols.append(
                    fits.Column("CHANNEL",
                                "I",
                                array=nonzero[0].reshape(nfilled)))
                cols.append(
                    fits.Column(
                        "VALUE",
                        "E",
                        array=data.flat[keys].astype(float).reshape(nfilled)))
            else:
                raise Exception("HPX.write_fits only handles 1D and 2D maps")

        else:
            if len(shape) == 1:
                cols.append(
                    fits.Column(self.conv.colname(indx=self.conv.firstcol),
                                "E",
                                array=data.astype(float)))
            elif len(shape) == 2:
                for i in range(shape[0]):
                    cols.append(
                        fits.Column(self.conv.colname(indx=i +
                                                      self.conv.firstcol),
                                    "E",
                                    array=data[i].astype(float)))
            else:
                raise Exception("HPX.write_fits only handles 1D and 2D maps")

        header = self.make_header()
        hdu = fits.BinTableHDU.from_columns(cols, header=header, name=extname)

        return hdu
Ejemplo n.º 14
0
def divspec(datadir, srcfile, stdfile, dtau=0, dpix=0, mode=None,plot=True):

    #Read in data for source and standard
    hdulist_src=fits.open(datadir+srcfile)
    data_src=hdulist_src[1].data
    hdr_src=hdulist_src[0].header

    hdulist_std=fits.open(datadir+stdfile)
    data_std=hdulist_std[1].data

    #Normalize spectra
    pflux_src=data_src['flux_pos']/np.median(data_src['flux_pos'])
    nflux_src=data_src['flux_neg']/np.median(data_src['flux_neg'])
    pflux_std=data_std['flux_pos']/np.median(data_std['flux_pos'])
    nflux_std=data_std['flux_neg']/np.median(data_std['flux_neg'])

    pwave_src=data_src['wave_pos']
    nwave_src=data_src['wave_neg']
    pwave_std=data_std['wave_pos']
    nwave_std=data_std['wave_neg']

    #If desired, do airmass correction of standard
    np.seterr(invalid='ignore')
    pflux_std=np.exp((1+dtau)*np.log(pflux_std))
    nflux_std=np.exp((1+dtau)*np.log(nflux_std))

    #If desired, do wavelength shift of standard
    dwave=(pwave_src[1]-pwave_src[0])*dpix    #Convert pixel shift to wavelength shift
    pwave_std=pwave_std+dwave
    nwave_std=nwave_std+dwave

    #Interpolate everything onto positive src wavelengths
    nf_src=interp1d(nwave_src,nflux_src,bounds_error=False)
    pf_std=interp1d(pwave_std,pflux_std,bounds_error=False)
    nf_std=interp1d(nwave_std,nflux_std,bounds_error=False)

    nflux_src=nf_src(pwave_src)
    pflux_std=pf_std(pwave_src)
    nflux_std=nf_std(pwave_src)

    #Divide source by standard
    pdiv=pflux_src/pflux_std
    ndiv=nflux_src/nflux_std

    #Combine positive and negative beams
    divflux=(pdiv+ndiv)/2.
    srcflux=(pflux_src+nflux_src)/2.
    stdflux=(pflux_std+nflux_std)/2.

    #Compute SNR's in desired regions
    if(mode is not None):
        if(mode=='M0'):
            xsnr1_left=4.66
            xsnr1_right=4.67
            xsnr2_left=4.685
            xsnr2_right=4.69
            xsnr3_left=4.70
            xsnr3_right=4.715
        if(mode=='M1'):
            xsnr1_left=4.73
            xsnr1_right=4.75
            xsnr2_left=4.75
            xsnr2_right=4.77
            xsnr3_left=4.77
            xsnr3_right=4.79
        if(mode=='M2'):
            xsnr1_left=4.965
            xsnr1_right=4.975
            xsnr2_left=4.985
            xsnr2_right=5.0
            xsnr3_left=5.01
            xsnr3_right=5.015
        if(mode=='M3'):
            xsnr1_left=5.04
            xsnr1_right=5.05
            xsnr2_left=5.065
            xsnr2_right=5.075
            xsnr3_left=5.09
            xsnr3_right=5.095
        w1=((pwave_src > xsnr1_left) & (pwave_src<xsnr1_right))
        w2=((pwave_src > xsnr2_left) & (pwave_src<xsnr2_right))
        w3=((pwave_src > xsnr3_left) & (pwave_src<xsnr3_right))
    else:
        w1=np.isfinite(srcflux)
        w2=w1
        w3=w1
                
    snr1=np.nanmean(divflux[w1])/np.nanstd(divflux[w1])
    snr2=np.nanmean(divflux[w2])/np.nanstd(divflux[w2])
    snr3=np.nanmean(divflux[w3])/np.nanstd(divflux[w3])
    print('SNR:', snr1,snr2,snr3)

    #Create columns
    c1  = fits.Column(name='wave', format='D', array=pwave_src)
    c2  = fits.Column(name='div', format='D', array=divflux)
    c3  = fits.Column(name='src', format='D', array=srcflux)
    c4  = fits.Column(name='std', format='D', array=stdflux)
    coldefs = fits.ColDefs([c1,c2,c3,c4])
    tbhdu = fits.BinTableHDU.from_columns(coldefs)
    primary_hdu = fits.PrimaryHDU(header=hdr_src)    
    hdul = fits.HDUList([primary_hdu, tbhdu])

   #Create output file name
    srcname=srcfile.split('_')[0]
    date=srcfile.split('_')[1]
    stdname=stdfile.split('_')[0]
    srcnum=srcfile.split('_')[2]
    if(mode is not None): 
        outfile=srcname+'_'+stdname+'_'+date+'_'+srcnum+'_'+mode+'_div.fits'
    else:
        outfile=srcname+'_'+stdname+'_'+date+'_'+srcnum+'_div.fits'

    #Write to fits file    
    hdul.writeto(outfile,overwrite=True) 
    print('Writing to: ', outfile)

    #Create astropy table
    spectrum_table = Table([pwave_src, divflux, srcflux, stdflux], names=('wave', 'div', 'src','std'),  dtype=('f8', 'f8','f8','f8'))
    spectrum_table['wave'].unit = 'micron'

    if(plot==True):
        fig=plt.figure(figsize=(14,6))
        ax1=fig.add_subplot(211)
        ax1.plot(spectrum_table['wave'],spectrum_table['src'],label='src')
        ax1.plot(spectrum_table['wave'],spectrum_table['std'],label='std')
        ax1.legend()

        ax2=fig.add_subplot(212)
        ax2.plot(spectrum_table['wave'],spectrum_table['div'],label='div')
        ax2.set_ylim(0.8,1.2)
        ax2.legend()

        if(mode is not None):
            ax1.axvline(xsnr1_left,linestyle='--',color='C2')
            ax1.axvline(xsnr1_right,linestyle='--',color='C2')
            ax1.axvline(xsnr2_left,linestyle='--',color='C2')
            ax1.axvline(xsnr2_right,linestyle='--',color='C2')
            ax1.axvline(xsnr3_left,linestyle='--',color='C2')
            ax1.axvline(xsnr3_right,linestyle='--',color='C2')
            ax2.axvline(xsnr1_left,linestyle='--',color='C2')
            ax2.axvline(xsnr1_right,linestyle='--',color='C2')
            ax2.axvline(xsnr2_left,linestyle='--',color='C2')
            ax2.axvline(xsnr2_right,linestyle='--',color='C2')
            ax2.axvline(xsnr3_left,linestyle='--',color='C2')
            ax2.axvline(xsnr3_right,linestyle='--',color='C2')
        plt.show()


    return spectrum_table
Ejemplo n.º 15
0
imhdr.set('NAME_3', 'XIM', after='STRT_3')

imhdu.header = imhdr

# - write the COVMAT extension to a new output file to start
imhdu.writeto(foutput, overwrite=True)

###################################
# Do the WTHETA 2PTDATA extensions
###################################
if 'pp' in ctypes:

    print("Doing WTHETA 2PTDATA extension...")

    colnames = ['BIN1', 'BIN2', 'ANGBIN', 'VALUE', 'ANG']
    c1 = pf.Column(name=colnames[0], format='K', array=bin1_wtheta_vec)
    c2 = pf.Column(name=colnames[1], format='K', array=bin2_wtheta_vec)
    c3 = pf.Column(name=colnames[2], format='K', array=angbin_wtheta_vec)
    c4 = pf.Column(name=colnames[3], format='D', array=wtheta_vec)
    c5 = pf.Column(name=colnames[4],
                   format='D',
                   unit='arcmin',
                   array=ang_wtheta_vec)  # make sure it's in arcmin!

    # - first do XIP
    tbhdu = pf.BinTableHDU.from_columns([c1, c2, c3, c4, c5])

    # - update the table's header
    tbhdr = tbhdu.header
    tbhdr.set('2PTDATA', True, before='TTYPE1')
    tbhdr.set(
Ejemplo n.º 16
0
    print rootfilename

    rootfile = TFile(rootfilename)

    hists_to_get = ['BGRate',
                    'EffectiveArea',
                    'DiffSens' ]            
    E = None
    cols = []

    # get the 1D columns:
    for histname in hists_to_get:
        lo,hi,cen, val = get_hist_1d( rootfile, histname )

        if len(cols) == 0:
            cols.append(fits.Column( name="LOG10_E_LO", format="D",array=lo ))
            cols.append(fits.Column( name="LOG10_E_HI", format="D",array=hi ))

        cols.append( fits.Column( name=histname, format="D", array=val ))

    # #insert the migration matrix:
    etlo,ethi,et,erlo,erhi,er,mat = get_hist_2d( rootfile, "MigMatrix" )

    # cols.append( fits.Column( name="E_migration",
    #                             format="{0}D".format(len(mat[0])),
    #                             array=mat))

    tbhdu = fits.new_table( cols )
    tbhdu.name="CTASENS"

Ejemplo n.º 17
0
def initialize_psrfits(outfile,
                       y,
                       npsub=-1,
                       nstart=None,
                       nsamp=None,
                       chan_freqs=None):
    """
    Set up a PSRFITS file with everything set up EXCEPT
    the DATA.

    Args:

        outfile: path to the output fits file to write to

        y: your object with the input Filterbank file

        npsub: number of spectra in a subint

        nstart: start sample to read from (for the input file)

        nsamp: number of spectra to read

        chan_freqs: array with frequencies of all the channels

    """

    # Obs Specific Metadata
    # Time Info
    nbits = y.your_header.nbits
    mjd = y.your_header.tstart
    tsamp = y.your_header.tsamp  # seconds

    if nsamp:
        nsamps = nsamp
    else:
        nsamps = y.your_header.nspectra

    if nstart:
        mjd += nstart * tsamp / (24 * 60 * 60)
        if nstart + nsamps > y.your_header.nspectra:
            logging.warning(
                'Data requested exceeds the length of file. Reading data till end of file.'
            )
            nsamps = y.your_header.nspectra - nstart

    # Frequency Info (All freqs in MHz)
    if not chan_freqs.all():
        chan_freqs = y.chan_freqs
    nchans = len(chan_freqs)
    fch1 = chan_freqs[0]
    foff = y.your_header.foff

    freqs = fch1 + np.arange(nchans) * foff
    fcenter = fch1 + nchans * foff / 2

    nifs = y.your_header.npol
    # Source Info
    src_name = y.your_header.source_name

    from astropy.coordinates import SkyCoord

    if y.your_header.ra_deg and y.your_header.dec_deg:
        ra = y.your_header.ra_deg
        dec = y.your_header.dec_deg
    else:
        ra = 0
        dec = 0

    loc = SkyCoord(ra, dec, unit='deg')
    ra_hms = loc.ra.hms
    dec_dms = loc.dec.dms

    ra_str = f'{int(ra_hms[0]):02d}:{np.abs(int(ra_hms[1])):02d}:{np.abs(ra_hms[2]):07.4f}'
    dec_str = f'{int(dec_dms[0]):02d}:{np.abs(int(dec_dms[1])):02d}:{np.abs(dec_dms[2]):07.4f}'

    # Beam Info
    beam_info = np.array([0.0, 0.0, 0.0])
    bmaj_deg = beam_info[0] / 3600.0
    bmin_deg = beam_info[1] / 3600.0
    bpa_deg = beam_info[2]

    # Fill in the ObsInfo class
    d = ObsInfo()
    d.fill_from_mjd(mjd)
    d.fill_freq_info(fcenter, nchans, foff)
    d.fill_source_info(src_name, ra_str, dec_str)
    d.fill_beam_info(bmaj_deg, bmin_deg, bpa_deg)
    d.fill_data_info(tsamp, nbits)
    d.calc_start_lst(mjd)

    logging.info('ObsInfo updated with relevant parameters')

    # Determine subint size for PSRFITS table
    if npsub > 0:
        n_per_subint = npsub
    else:
        n_per_subint = int(1.0 / tsamp)

    n_subints = int(nsamps / n_per_subint)
    if nsamps % n_per_subint:
        n_subints += 1

    tstart = 0.0
    t_subint = n_per_subint * tsamp
    d.nsblk = n_per_subint
    d.scan_len = t_subint * n_subints

    tsubint = np.ones(n_subints, dtype=np.float64) * t_subint
    offs_sub = (np.arange(n_subints) + 0.5) * t_subint + tstart

    logger.info(
        f'Setting the following info to be written in {outfile} \n {json.dumps(vars(d), indent=4, sort_keys=True)}'
    )

    # Fill in the headers
    phdr = d.fill_primary_header()
    thdr = d.fill_table_header()
    fits_data = fits.HDUList()
    data = np.array([], dtype=y.your_header.dtype)

    # Prepare arrays for columns
    lst_sub = np.array([
        d.calc_lst(mjd + tsub / (24. * 3600.0), d.longitude)
        for tsub in offs_sub
    ],
                       dtype=np.float64)
    ra_deg, dec_deg = y.your_header.ra_deg, y.your_header.dec_deg
    l_deg, b_deg = y.your_header.gl, y.your_header.gb
    ra_sub = np.ones(n_subints, dtype=np.float64) * ra_deg
    dec_sub = np.ones(n_subints, dtype=np.float64) * dec_deg
    glon_sub = np.ones(n_subints, dtype=np.float64) * l_deg
    glat_sub = np.ones(n_subints, dtype=np.float64) * b_deg
    fd_ang = np.zeros(n_subints, dtype=np.float32)
    pos_ang = np.zeros(n_subints, dtype=np.float32)
    par_ang = np.zeros(n_subints, dtype=np.float32)
    tel_az = np.zeros(n_subints, dtype=np.float32)
    tel_zen = np.zeros(n_subints, dtype=np.float32)
    dat_freq = np.vstack([freqs] * n_subints).astype(np.float32)

    dat_wts = np.ones((n_subints, nchans), dtype=y.your_header.dtype)
    dat_offs = np.zeros((n_subints, nchans), dtype=y.your_header.dtype)
    dat_scl = np.ones((n_subints, nchans), dtype=y.your_header.dtype)

    # https://het.as.utexas.edu/HET/Software/Astropy-1.0/_modules/astropy/io/fits/column.html
    # mapping from TFORM data type to numpy data type (code)
    # L: Logical (Boolean)
    # B: Unsigned Byte
    # I: 16-bit Integer
    # J: 32-bit Integer
    # K: 64-bit Integer
    # E: Single-precision Floating Point
    # D: Double-precision Floating Point
    # C: Single-precision Complex
    # M: Double-precision Complex
    # A: Character

    dtype = y.your_header.dtype
    if dtype == np.uint8:
        data_format = 'B'
    elif dtype == np.uint16:
        data_format = 'I'
    elif dtype == np.uint32:
        data_format = 'J'
    elif dtype == np.uint64:
        data_format = 'K'
    elif dtype == np.float32:
        data_format = 'E'
    elif dtype == np.float64:
        data_format = 'D'
    else:
        data_format = 'E'

    # Make the columns
    tbl_columns = [
        fits.Column(name="TSUBINT", format='1D', unit='s', array=tsubint),
        fits.Column(name="OFFS_SUB", format='1D', unit='s', array=offs_sub),
        fits.Column(name="LST_SUB", format='1D', unit='s', array=lst_sub),
        fits.Column(name="RA_SUB", format='1D', unit='deg', array=ra_sub),
        fits.Column(name="DEC_SUB", format='1D', unit='deg', array=dec_sub),
        fits.Column(name="GLON_SUB", format='1D', unit='deg', array=glon_sub),
        fits.Column(name="GLAT_SUB", format='1D', unit='deg', array=glat_sub),
        fits.Column(name="FD_ANG", format='1E', unit='deg', array=fd_ang),
        fits.Column(name="POS_ANG", format='1E', unit='deg', array=pos_ang),
        fits.Column(name="PAR_ANG", format='1E', unit='deg', array=par_ang),
        fits.Column(name="TEL_AZ", format='1E', unit='deg', array=tel_az),
        fits.Column(name="TEL_ZEN", format='1E', unit='deg', array=tel_zen),
        fits.Column(name="DAT_FREQ",
                    format=f'{nchans}E',
                    unit='MHz',
                    array=dat_freq),
        fits.Column(name="DAT_WTS", format=f'{nchans}E', array=dat_wts),
        fits.Column(name="DAT_OFFS", format=f'{nchans}E', array=dat_offs),
        fits.Column(name="DAT_SCL", format=f'{nchans}E', array=dat_scl),
        fits.Column(name="DATA",
                    format=str(nifs * nchans * n_per_subint) + data_format,
                    dim=f'({nchans}, {nifs}, {n_per_subint})',
                    array=data),
    ]

    # Add the columns to the table
    logging.info("Building the PSRFITS table")
    table_hdu = fits.BinTableHDU(fits.FITS_rec.from_columns(tbl_columns),
                                 name="subint",
                                 header=thdr)

    # Add primary header
    primary_hdu = fits.PrimaryHDU(header=phdr)

    # Add hdus to FITS file and write
    logging.info(f'Writing PSRFITS table to file: {outfile}')
    fits_data.append(primary_hdu)
    fits_data.append(table_hdu)
    fits_data.writeto(outfile, overwrite=True)
    logging.info(f'Header information written in {outfile}')
    return
Ejemplo n.º 18
0
    def as_binary_table(self, record_name=None):

        # Should this be lazy loaded?
        import astropy.io.fits as pf

        if record_name is None:
            record_name = self.record_name

        # Get the maximum number of features identified in any
        # record.  Use this as the length of the array in the
        # wavelength_coord and fit_wavelength fields
        nfeat = max(
            [len(record.x) for record in self.identify_database.records])

        # The number of coefficients should be the same for all
        # records, so take the value from the first record
        ncoeff = self.identify_database.records[0].nterms

        # Get the number of rows from the number of identify records
        nrows = self.identify_database.numrecords

        # Create pyfits Columns for the table
        column_formats = [
            {
                "name": "spatial_coord",
                "format": "I"
            },
            {
                "name": "spectral_coord",
                "format": "%dE" % nfeat
            },
            {
                "name": "fit_wavelength",
                "format": "%dE" % nfeat
            },
            {
                "name": "ref_wavelength",
                "format": "%dE" % nfeat
            },
            {
                "name": "fit_coefficients",
                "format": "%dE" % ncoeff
            },
        ]
        columns = [pf.Column(**fmt) for fmt in column_formats]

        # Make the empty table.  Use the number of records in the
        # database as the number of rows
        table = pf.new_table(columns, nrows=nrows)

        # Populate the table from the records
        for i in range(nrows):
            record = self.identify_database.records[i]
            row = table.data[i]
            row["spatial_coord"] = record.y
            row["fit_coefficients"] = record.coeff
            if len(row["spectral_coord"]) != len(record.x):
                row["spectral_coord"][:len(record.x)] = record.x
                row["spectral_coord"][len(record.x):] = -999
            else:
                row["spectral_coord"] = record.x
            if len(row["fit_wavelength"]) != len(record.z):
                row["fit_wavelength"][:len(record.z)] = record.z
                row["fit_wavelength"][len(record.z):] = -999
            else:
                row["fit_wavelength"] = record.z
            if len(row["ref_wavelength"]) != len(record.zref):
                row["ref_wavelength"][:len(record.zref)] = record.zref
                row["ref_wavelength"][len(record.zref):] = -999
            else:
                row["ref_wavelength"] = record.zref

        # Store the record name in the header
        table.header.update("RECORDNM", record_name)

        # Store other important values from the identify records in the header
        # These should be the same for all records, so take values
        # from the first record
        first_record = self.identify_database.records[0]
        table.header.update("IDUNITS", first_record.fields["units"])
        table.header.update("IDFUNCTN", first_record.modelname)
        table.header.update("IDORDER", first_record.nterms)
        table.header.update("IDSAMPLE", first_record.fields["sample"])
        table.header.update("IDNAVER", first_record.fields["naverage"])
        table.header.update("IDNITER", first_record.fields["niterate"])
        table.header.update(
            "IDREJECT", "%s %s" % (first_record.fields["low_reject"],
                                   first_record.fields["high_reject"]))
        table.header.update("IDGROW", first_record.fields["grow"])
        table.header.update(
            "IDRANGE",
            "%s %s" % (first_record.mrange[0], first_record.mrange[1]))

        # Store fitcoords information in the header
        fc_record = self.fitcoords_database
        table.header.update("FCUNITS", fc_record.fields["units"])
        table.header.update("FCAXIS", fc_record.fields["axis"])
        table.header.update("FCFUNCTN", fc_record.modelname)
        table.header.update("FCXORDER", fc_record.xorder)
        table.header.update("FCYORDER", fc_record.yorder)
        table.header.update(
            "FCXRANGE", "%s %s" % (fc_record.xbounds[0], fc_record.xbounds[1]))
        table.header.update(
            "FCYRANGE", "%s %s" % (fc_record.ybounds[0], fc_record.ybounds[1]))
        for i in range(len(fc_record.coeff)):
            coeff = fc_record.coeff[i]
            table.header.update("FCCOEF%d" % i, coeff)
####here -- comments

        return table
Ejemplo n.º 19
0
def make_ETC_simulations_single(ETC_simulation_prefix,
                                wl,
                                sed,
                                redshift,
                                recompute,
                                FWAs,
                                GWAs,
                                nbexps,
                                sersic=None,
                                effective_radius=None,
                                seed=None):

    # SED (units are those putput from Beagle, i.e. erg s^-1 cm^-2 A^-1)
    # Redshfit of the i-th object (i.e., i-th row in the input FITS catalogue)

    # Name of the FITS file containing the input SED for the ETC simulator
    ETC_input_file = os.path.join(
        ETC_input_dir, ETC_simulation_prefix + '_input_for_ETC.fits')

    # By default you always recompute the input file for the ETC, but in
    # some occasions ypu may just want to create the input file for some
    # missing objects
    if not os.path.isfile(ETC_input_file) or recompute:

        # Function that creates the FITS file that will later be used as
        # input for the ETC simulator. Note that this function simply
        # convert the flux to observed frame, and from F_lambda into F_nu
        # (in Jansky)
        write_ETC_input_file(wl, sed, redshift, ETC_input_file)

    # Cycle across each combination of filter, grating, and number of exposures
    for FWA, GWA, nbexp in zip(args.FWAs, args.GWAs, args.nbexps):

        # Name of the file created by the ETC simulator (need the name to
        # check if the file already exists or not)
        ETC_output_file = ETC_simulation_prefix + "_snr_PS_" + FWA + "_" + GWA + ".fits"
        ETC_output_file = os.path.join(ETC_output_dir, ETC_output_file)
        if not os.path.isfile(ETC_output_file) or recompute:

            # Run the actual scripts that compute the ETC-like simulated NIRSpec observation
            compute_ETC_simulation(ETC_input_file, FWA, GWA, nbexp,
                                   ETC_output_dir, ETC_simulation_prefix,
                                   sersic, effective_radius, seed)

            # The SED output from the ETC simulator is in units of Jansky
            # (F_nu), while Beagle works in F_lambda. We therefore add two
            # columns to the ETC output containing a Beagle-friendly
            # format.

            # Open the file containing the ETC-like simulation
            hduETC = fits.open(ETC_output_file)

            # Get existing columns
            existing_cols = hduETC[1].columns

            # Add new columns
            new_col = list()

            # Add "FLUX_FLAMBDA" column, expressing the flux in F_lambda, erg s^-1 cm^-2 A^-1
            # The units of the NRSPEC column are Jy
            flux = hduETC[1].data['NRSPEC'] * 1.E-23 * c_light / (
                hduETC[1].data['WAVELENGTH'] * 1.E+10)**2
            new_col.append(
                fits.Column(name='FLUX_FLAMBDA',
                            array=flux,
                            format='E',
                            unit='erg s^-1 cm^-2 A^-1'))

            # Add "NOISE_FLAMBDA" column, expressing the flux in F_lambda, erg s^-1 cm^-2 A^-1
            # The units of the NOISE column are Jy
            noise = hduETC[1].data['NOISE'] * 1.E-23 * c_light / (
                hduETC[1].data['WAVELENGTH'] * 1.E+10)**2
            new_col.append(
                fits.Column(name='NOISE_FLAMBDA',
                            array=noise,
                            format='E',
                            unit='erg s^-1 cm^-2 A^-1'))

            new_col_defs = fits.ColDefs(new_col)

            hduETC[1] = fits.BinTableHDU.from_columns(existing_cols +
                                                      new_col_defs)

            # Add redshift keyword
            hduETC[1].header['redshift'] = float(redshift)

            # Overwrite the FITS file
            hduETC.writeto(ETC_output_file, overwrite=True)

            hduETC.close()
Ejemplo n.º 20
0
for catalog_entry in orig_table:
    krF = join(kroupaFolder,
               'spFly-vipers-' + catalog_entry['id_IAU'][7:] + "-kr.fits")
    ssF = join(salpeterFolder,
               'spFly-vipers-' + catalog_entry['id_IAU'][7:] + "-ss.fits")
    if os.path.isfile(krF) and os.path.isfile(ssF):
        #print "gets info"
        table_entry_kr = get_table_entry_full(hduSPM=fits.open(krF)[1])
        #print table_entry_kr.shape
        table_entry_ss = get_table_entry_full(hduSPM=fits.open(ssF)[1])
        #print table_entry_ss.shape
        table_entry = n.hstack((table_entry_kr, table_entry_ss))
        table_all.append(table_entry)
    else:
        table_all.append(n.ones(22) * dV)

headers = " age_lightW_mean_kroupa age_lightW_err_plus_kroupa age_lightW_err_minus_kroupa metallicity_lightW_mean_kroupa metallicity_lightW_mean_err_plus_kroupa metallicity_lightW_mean_err_minus_kroupa stellar_mass_kroupa stellar_mass_err_plus_kroupa stellar_mass_err_minus_kroupa spm_EBV_kroupa nComponentsSSP_kroupa age_lightW_mean_salpeter age_lightW_err_plus_salpeter age_lightW_err_minus_salpeter metallicity_lightW_mean_salpeter metallicity_lightW_mean_err_plus_salpeter metallicity_lightW_mean_err_minus_salpeter stellar_mass_salpeter stellar_mass_err_plus_salpeter stellar_mass_err_minus_salpeter spm_EBV_salpeter nComponentsSSP_salpeter"

newDat = n.transpose(table_all)

all_cols = []
for data_array, head in zip(newDat, headers.split()):
    all_cols.append(fits.Column(name=head, format='D', array=data_array))

new_cols = fits.ColDefs(all_cols)
hdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
if os.path.isfile(plate_catalog):
    os.remove(plate_catalog)

hdu.writeto(plate_catalog)
Ejemplo n.º 21
0
    def write(self, out_name=None):
        '''Write current CCI object to fits file.

        Output files are used in later analysis to determine when
        regions fall below the threshold.
        '''

        out_name = out_name or self.cci_name + '_gainmap.fits'

        if os.path.exists(out_name):
            print("not clobbering existing file")
            return

        #-------Ext=0
        hdu_out = fits.HDUList(fits.PrimaryHDU())

        hdu_out[0].header['TELESCOP'] = 'HST'
        hdu_out[0].header['INSTRUME'] = 'COS'
        hdu_out[0].header['DETECTOR'] = 'FUV'
        hdu_out[0].header['OPT_ELEM'] = 'ANY'
        hdu_out[0].header['FILETYPE'] = 'GAINMAP'

        hdu_out[0].header['XBINNING'] = self.xbinning
        hdu_out[0].header['YBINNING'] = self.ybinning
        hdu_out[0].header['SRC_FILE'] = self.cci_name
        hdu_out[0].header['SEGMENT'] = self.segment
        hdu_out[0].header['EXPSTART'] = self.expstart
        hdu_out[0].header['EXPEND'] = self.expend
        hdu_out[0].header['EXPTIME'] = self.exptime
        hdu_out[0].header['NUMFILES'] = self.numfiles
        hdu_out[0].header['COUNTS'] = self.counts
        hdu_out[0].header['DETHV'] = self.dethv
        hdu_out[0].header['cnt00_00'] = self.cnt00_00
        hdu_out[0].header['cnt01_01'] = self.cnt01_01
        hdu_out[0].header['cnt02_30'] = self.cnt02_30
        hdu_out[0].header['cnt31_31'] = self.cnt31_31

        #-------EXT=1
        included_files = np.array(self.file_list)
        files_col = fits.Column('files', '24A', 'rootname', array=included_files)
        tab = fits.BinTableHDU.from_columns([files_col])

        hdu_out.append(tab)
        hdu_out[1].header['EXTNAME'] = 'FILES'

        #-------EXT=2
        hdu_out.append(fits.ImageHDU(data=self.gain_image))
        hdu_out[2].header['EXTNAME'] = 'MOD_GAIN'

        #-------EXT=3
        hdu_out.append(fits.ImageHDU(data=self.counts_image))
        hdu_out[3].header['EXTNAME'] = 'COUNTS'

        #-------EXT=4
        hdu_out.append(fits.ImageHDU(data=self.extracted_charge))
        hdu_out[4].header['EXTNAME'] = 'CHARGE'

        #-------EXT=5
        hdu_out.append(fits.ImageHDU(data=self.big_array[0]))
        hdu_out[5].header['EXTNAME'] = 'cnt00_00'

        #-------EXT=6
        hdu_out.append(fits.ImageHDU(data=self.big_array[1]))
        hdu_out[6].header['EXTNAME'] = 'cnt01_01'

        #-------EXT=7
        hdu_out.append(fits.ImageHDU(data=np.sum(self.big_array[2:31],axis=0)))
        hdu_out[7].header['EXTNAME'] = 'cnt02_30'

        #-------EXT=8
        hdu_out.append(fits.ImageHDU(data=self.big_array[31]))
        hdu_out[8].header['EXTNAME'] = 'cnt31_31'


        #-------Write to file
        hdu_out.writeto(out_name)
        hdu_out.close()
Ejemplo n.º 22
0
    def run(self):
        """ Runs the calibrating algorithm. The calibrated data is
            returned in self.dataout
        """
        ### Preparation
        binning = self.datain.getheadval('XBIN')
        ### Run Source Extractor
        # Make sure input data exists as file
        if not os.path.exists(self.datain.filename):
            self.datain.save()
        # Make catalog filename
        catfilename = self.datain.filenamebegin
        if catfilename[-1] in '._-': catfilename += 'sex_cat.fits'
        else: catfilename += '.sex_cat.fits'
        # Make background filename (may not be used - see below)
        bkgdfilename = self.datain.filenamebegin
        if bkgdfilename[-1] in '._-': bkgdfilename += 'SxBkgd.fits'
        else: bkgdfilename += '_SxBkgd.fits'
        self.log.debug('Sextractor catalog filename = %s' % catfilename)
        # Make command string
        command = self.getarg('sx_cmd') % (self.datain.filename)
        command += ' ' + self.getarg('sx_options')
        command += ' -c ' + os.path.expandvars(self.getarg('sx_confilename'))
        command += ' -CATALOG_NAME ' + catfilename
        command += ' -PARAMETERS_NAME ' + os.path.expandvars(
            self.getarg('sx_paramfilename'))
        command += ' -FILTER_NAME ' + os.path.expandvars(
            self.getarg('sx_filterfilename'))
        # Still make backgroundimage so you can subtract it below
        command += ' -CHECKIMAGE_TYPE BACKGROUND'
        command += ' -CHECKIMAGE_NAME ' + bkgdfilename
        # Call process
        self.log.debug('running command = %s' % command)
        process = subprocess.Popen(command,
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
        output, error = process.communicate()
        if self.getarg('verbose'):
            self.log.debug(output)
        #subprocess.check_call(command)
        ### Extract catalog from source extractor and clean up dataset
        # Use catalog from sourse extrator (test.cat)
        seo_catalog = astropy.table.Table.read(catfilename,
                                               format="fits",
                                               hdu='LDAC_OBJECTS')

        seo_Flux = seo_catalog['FLUX_AUTO']
        seo_Fluxerr = seo_catalog['FLUXERR_AUTO']
        seo_Mag = -2.5 * np.log10(seo_catalog['FLUX_AUTO'])
        seo_MagErr = (2.5 / np.log(10) * seo_catalog['FLUXERR_AUTO'] /
                      seo_catalog['FLUX_AUTO'])

        # Select only the stars in the image: circular image and S/N > 10
        elongation = (seo_catalog['FLUX_APER'] -
                      seo_catalog['FLUX_AUTO']) < 250
        seo_SN = ((seo_catalog['FLUX_AUTO'] / seo_catalog['FLUXERR_AUTO']) >
                  10)
        seo_SN = (seo_SN) & (elongation) & (
            (seo_catalog['FLUX_AUTO'] / seo_catalog['FLUXERR_AUTO']) < 1000)
        self.log.debug('Selected %d stars from Source Extrator catalog' %
                       np.count_nonzero(seo_SN))
        # Delete source extractor catalog is needed
        if self.getarg('delete_cat'):
            os.remove(catfilename)

        ### Make table with all data from source extractor
        # Collect data columns
        cols = []
        num = np.arange(1, len(seo_catalog['X_IMAGE'][seo_SN]) + 1)
        cols.append(fits.Column(name='ID', format='D', array=num))
        cols.append(
            fits.Column(name='X',
                        format='D',
                        array=seo_catalog['X_IMAGE'][seo_SN],
                        unit='pixel'))
        cols.append(
            fits.Column(name='Y',
                        format='D',
                        array=seo_catalog['Y_IMAGE'][seo_SN],
                        unit='pixel'))
        cols.append(
            fits.Column(name='Uncalibrated Flux',
                        format='D',
                        array=seo_Flux[seo_SN],
                        unit='flux'))
        cols.append(
            fits.Column(name='Uncalibrated Fluxerr',
                        format='D',
                        array=seo_Fluxerr[seo_SN],
                        unit='flux'))
        # Make table
        c = fits.ColDefs(cols)
        sources_table = fits.BinTableHDU.from_columns(c)

        ### Make output data
        # Copy data from datain
        self.dataout = self.datain

        # Add sources and fitdata table
        self.dataout.tableset(sources_table.data, 'Sources',
                              sources_table.header)

        # Remove background file if it's not needed
        if not self.getarg('savebackground'):
            os.remove(bkgdfilename)

        ### If requested make a text file with the sources list
        if self.getarg('sourcetable'):

            # Save region file

            filename = self.dataout.filenamebegin + 'FCALsources.reg'
            with open(filename, 'w+') as f:
                f.write("# Region file format: DS9 version 4.1\n")
                f.write(
                    """global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1 image\n"""
                )
                for i in range(len(seo_catalog['X_IMAGE'][seo_SN])):
                    f.write("circle(%.7f,%.7f,0.005) # text={%i}\n" %
                            (seo_catalog['X_IMAGE'][seo_SN][i],
                             seo_catalog['Y_IMAGE'][seo_SN][i], num[i]))

            # Save the table
            txtname = self.dataout.filenamebegin + 'FCALsources.txt'
            ascii.write(self.dataout.tableget('Sources'),
                        txtname,
                        format=self.getarg('sourcetableformat'))
            self.log.debug('Saved sources table under %s' % txtname)
Ejemplo n.º 23
0
    def voronoi_binning(self, target_snr=10.0, write_fits=False, outfile=None, overwrite=False, plot=False,
                        flag_threshold=0.5, **kwargs):
        """
        Applies Voronoi binning to the data cube, using Cappellari's Python implementation.

        Parameters
        ----------
        target_snr : float
            Desired signal to noise ratio of the binned pixels
        write_fits : boolean
            Writes a FITS image with the output of the binning.
        plot: bool
            Plots the binning results.
        outfile : string
            Name of the output FITS file. If 'None' then the name of
            the original FITS file containing the data cube will be used
            as a root name, with '.bin' appended to it.
        overwrite : boolean
            Overwrites files with the same name given in 'outfile'.
        flag_threshold : float
            Bins with less than this fraction of unflagged pixels will be flagged.
        **kwargs: dict
            Arguments passed to voronoi_2d_binning.

        Returns
        -------
        Nothing.

        Notes
        -----
        The output file contains two tables which outline the tesselation process. These are
        stored in the extensions 'VOR' and 'VORPLUS'.
        """

        try:
            from vorbin.voronoi_2d_binning import voronoi_2d_binning
        except ImportError:
            raise ImportError('Could not find the voronoi_2d_binning module. Please add it to your PYTHONPATH.')

        if self.noise is None:
            raise RuntimeError('This function requires prior execution of the snr_eval method.')

        # Initializing the binned arrays as zeros.
        assert hasattr(self, 'data'), 'Could not access the data attribute of the Cube object.'
        b_data = ma.zeros(self.data.shape)
        b_data.mask = self.flags.astype(bool)

        assert hasattr(self, 'variance'), 'Could not access the variance attribute of the Cube object.'
        b_variance = ma.zeros(self.variance.shape)
        b_variance.mask = self.flags.astype(bool)

        assert hasattr(self, 'flags'), 'Could not access the variance attribute of the Cube object.'
        b_flags = np.zeros_like(self.flags, dtype=int)

        valid_spaxels = np.ravel(~np.isnan(self.signal) & ~np.isnan(self.noise) & ~self.spatial_mask)

        x = np.ravel(np.indices(np.shape(self.signal))[1])[valid_spaxels]
        y = np.ravel(np.indices(np.shape(self.signal))[0])[valid_spaxels]

        s, n = deepcopy(self.signal), deepcopy(self.noise)

        s[s <= 0] = np.average(self.signal[self.signal > 0])
        n[n <= 0] = np.average(self.signal[self.signal > 0]) * .5

        signal, noise = np.ravel(s)[valid_spaxels], np.ravel(n)[valid_spaxels]

        bin_num, x_node, y_node, x_bar, y_bar, sn, n_pixels, scale = \
            voronoi_2d_binning(x, y, signal, noise, target_snr, plot=plot, quiet=0, **kwargs)
        v = np.column_stack([y, x, bin_num])

        # For every nan in the original cube, fill with nan the binned cubes.
        nan_idx = (Ellipsis,
                   np.ravel(np.indices(np.shape(self.signal))[0])[~valid_spaxels],
                   np.ravel(np.indices(np.shape(self.signal))[1])[~valid_spaxels])
        b_data[nan_idx] = np.nan
        b_variance[nan_idx] = np.nan
        b_flags[nan_idx] = 1

        for i in np.arange(bin_num.max() + 1):
            same_bin = v[:, 2] == i
            same_bin_coordinates = v[same_bin, :2]

            for k in same_bin_coordinates:
                binned_idx = (Ellipsis, k[0], k[1])
                unbinned_idx = (Ellipsis, same_bin_coordinates[:, 0], same_bin_coordinates[:, 1])

                b_data[binned_idx] = ma.mean(self.data[unbinned_idx], axis=1)
                b_variance[binned_idx] = ma.mean(self.variance[unbinned_idx], axis=1)
                b_flags[binned_idx] = (np.mean(self.flags[unbinned_idx], axis=1) >= flag_threshold).astype(int)

        b_data = b_data.data
        b_variance = b_variance.data

        if write_fits:

            h = fits.HDUList()
            hdu = fits.PrimaryHDU(header=self.header)
            hdu.name = 'PRIMARY'
            hdu.header['VORBIN'] = (True, 'Processed by Voronoi binning?')
            hdu.header['VORTSNR'] = (target_snr, 'Target SNR for Voronoi binning.')
            h.append(hdu)

            hdr = self.header_data
            # noinspection PyTypeChecker
            hdu = fits.ImageHDU(data=b_data, header=hdr)
            hdu.name = 'SCI'
            h.append(hdu)

            # noinspection PyTypeChecker
            hdu = fits.ImageHDU(data=b_variance, header=hdr)
            hdu.name = 'VAR'
            h.append(hdu)

            # noinspection PyTypeChecker
            hdu = fits.ImageHDU(data=b_flags, header=hdr)
            hdu.name = 'FLAGS'
            h.append(hdu)

            tbhdu = fits.BinTableHDU.from_columns(
                [
                    fits.Column(name='xcoords', format='i8', array=x),
                    fits.Column(name='ycoords', format='i8', array=y),
                    fits.Column(name='binNum', format='i8', array=bin_num),
                ], name='VOR')

            tbhdu_plus = fits.BinTableHDU.from_columns(
                [
                    fits.Column(name='ubin', format='i8', array=np.unique(bin_num)),
                    fits.Column(name='xNode', format='F16.8', array=x_node),
                    fits.Column(name='yNode', format='F16.8', array=y_node),
                    fits.Column(name='xBar', format='F16.8', array=x_bar),
                    fits.Column(name='yBar', format='F16.8', array=y_bar),
                    fits.Column(name='sn', format='F16.8', array=sn),
                    fits.Column(name='nPixels', format='i8', array=n_pixels),
                ], name='VORPLUS')

            h.append(tbhdu)
            h.append(tbhdu_plus)

            if outfile is None:
                outfile = self.fitsfile.replace('.fits', '_vor.fits')

            h.writeto(outfile, overwrite=overwrite)

        self.binned_cube = b_data
Ejemplo n.º 24
0
prihdr['DATE-OBS'] = (start_date_str, 'date of the observation in UTC')
prihdr['END-OBS'] = (end_date_str, 'end time of the observation in UTC')
prihdr['N-HK'] = (len(hk), 'number of housekeeping items')
prihdr.add_comment(
    'each binary table has two columns corresponding to the date and values')
for idx, label in enumerate(hk.keys()):
    prikey = 'HK%02i' % (idx + 1)
    prihdr[prikey] = (label, 'label for FITS binary table %2i' % (idx + 1))
prihdu = pyfits.PrimaryHDU(header=prihdr)

tbhdu = []
for label in hk.keys():

    dimstr = '%i' % len(hk[label]['value'])

    col1 = pyfits.Column(name='DATE',
                         format='D',
                         dim=dimstr,
                         unit='seconds',
                         array=hk[label]['time'])
    col2 = pyfits.Column(name=label,
                         format='D',
                         dim=dimstr,
                         unit=hk[label]['unit'],
                         array=hk[label]['value'])
    cols = pyfits.ColDefs([col1, col2])
    tbhdu.append(pyfits.BinTableHDU.from_columns(cols))

hdulist = pyfits.HDUList([prihdu] + tbhdu)
hdulist.writeto(fitsfile, overwrite=True)
Ejemplo n.º 25
0
def calibration_spectra_fits(num_structures, num_spec_points, spectra):
    """
    Generate calibration spectra fits structures given number of structures and spectral points.

    Parameters
    ----------
    num_structures : int
        Number of structures
    num_spec_points : int
        Number of spectral points
    Returns
    -------
    astropy.io.fits.HDUList
        HDU list, primary and binary extensions data, control.
    """
    control_columns = [
        fits.Column(name='DURATION', format='J'),
        fits.Column(name='QUIET_TIME', format='I', array=np.zeros(1)),
        fits.Column(name='LIVE_TIME', format='I', array=np.zeros(1)),
        fits.Column(name='AVERAGE_TEMP', format='I', array=np.zeros(1)),
        fits.Column(name='COMPRESSION_SCHEME_ACCUM_SKM',
                    format='3I',
                    array=np.zeros((1, 3))),
        fits.Column(name='DETECTOR_MASK', format='32B', array=np.zeros(1)),
        fits.Column(name='PIXEL_MASK', format='12B', array=np.zeros(1)),
        fits.Column(name='SUBSPECTRUM_MASK', format='8B', array=np.zeros(1)),
    ]

    for i in range(1, 33):
        control_columns.append(
            fits.Column(name=f'SUBSPEC{i}_NUM_POINTS', format='I'))
        control_columns.append(
            fits.Column(name=f'SUBSPEC{i}_NUM_SUMMED', format='I'))
        control_columns.append(
            fits.Column(name=f'SUBSPEC{i}_LOW_CHAN', format='I'))

    control_coldefs = fits.ColDefs(control_columns)
    control_hdu = fits.BinTableHDU.from_columns(control_coldefs)
    control_hdu.name = 'CONTROL'

    data_columns = (
        fits.Column(name='DETECTOR_ID',
                    format='B',
                    array=np.zeros(num_structures)),
        fits.Column(name='PIXEL_ID',
                    format='B',
                    array=np.zeros(num_structures)),
        fits.Column(name='SUBSPEC_ID',
                    format='B',
                    array=np.zeros(num_structures)),
        fits.Column(name='NUM_POINTS',
                    format='I',
                    array=np.zeros(num_structures)),
        fits.Column(name='COUNTS',
                    format=f'PJ',
                    array=np.array(spectra, dtype=np.object_)),
    )

    data_coldefs = fits.ColDefs(data_columns)
    data_hdu = fits.BinTableHDU.from_columns(data_coldefs)
    data_hdu.name = 'RATE'

    primary = fits.PrimaryHDU()

    calibration_spectra_hdu_list = fits.HDUList(
        [primary, data_hdu, control_hdu])
    return calibration_spectra_hdu_list
Ejemplo n.º 26
0
ra = radec.ra.degree
dec = radec.dec.degree

hdulist = []
hduprimary = fits.PrimaryHDU()
hduprimary.header.set('EXTNAME', 'PRIMARY')
hduprimary.header.set('FITSTYPE', 'BINTABLE')
hduprimary.header['NSIDE'] = (out_nside, 'NSIDE')
hduprimary.header['PIXAREA'] = (HP.nside2pixarea(out_nside), 'pixel solid angle (steradians)')
hduprimary.header['NEXTEN'] = (len(infiles)+2, 'Number of extensions')
hdulist += [hduprimary]
hdu = fits.HDUList(hdulist)
hdu.writeto(outfile1, clobber=True)

pos_cols = []
pos_cols += [fits.Column(name='l', format='D', array=gc.l.degree)]
pos_cols += [fits.Column(name='b', format='D', array=gc.b.degree)]
pos_cols += [fits.Column(name='RA', format='D', array=ra)]
pos_cols += [fits.Column(name='DEC', format='D', array=dec)]
pos_columns = fits.ColDefs(pos_cols, ascii=False)
pos_tbhdu = fits.new_table(pos_columns)
pos_tbhdu.header.set('EXTNAME', 'COORDINATE')
fits.append(outfile1, pos_tbhdu.data, pos_tbhdu.header, verify=False)

freqcol = [fits.Column(name='Frequency [MHz]', format='D', array=infiles_freq)]
freq_columns = fits.ColDefs(freqcol, ascii=False)
freq_tbhdu = fits.new_table(freq_columns)
freq_tbhdu.header.set('EXTNAME', 'FREQUENCY')
fits.append(outfile1, freq_tbhdu.data, freq_tbhdu.header, verify=False)

hdulist = []
Ejemplo n.º 27
0
 delobs = delem * (1. + z)
 tsigstart = delobs + delal + delrp
 if tsigstart < tobs:
     lib, doc = xml.CreateLib()
     LCfile = 'LC_nu_' + str(i + 1) + '.fits'
     ta = np.empty(500)
     na = np.empty(500)
     for j in xrange(0, 500):
         ta[j] = j
         if ta[j] < tsigstart:
             na[j] = 0.
         elif tsigstart < ta[j] < tobs:
             na[j] = 1.
         else:
             na[j] = 0.
     time = fits.Column(name='TIME', array=ta, format='1D', unit='s')
     norm = fits.Column(name='NORM', array=na, format='1D')
     t = fits.BinTableHDU.from_columns([time, norm], header=hdr)
     t.writeto(LCfile, overwrite=True)
     tsig = tobs - tsigstart
     ra = uniform(0., 360.)
     dec = declination[i]
     ETeV = np.logspace(-2, 2.5, 45)
     EMeV = ETeV * 1e6
     if z < 0.01:
         atten = 1.
     else:
         atten = np.exp(-1. * tau.opt_depth(z, ETeV))
     prefac = A[i] * 1e-13
     spec = prefac * (ETeV / ep)**(-gam)
     specebl = spec * atten
Ejemplo n.º 28
0
target = 'M4'
r = 0.4

#making query
tmc = Vizier.query_region(target, radius=r * u.deg,
                          catalog='II/246/out')  #2MASS catalog
ppmxl = Vizier.query_region(target, radius=r * u.deg,
                            catalog='I/317/sample')  # PPMXL proper motion

#get catalog only
jhk = tmc[0]
pm = ppmxl[0]

# retrive data only from 2MASS columns [3,4,9,11,13,15,17,19,21]
col1 = fits.Column(name=jhk.colnames[3],
                   format=jhk.dtype[3],
                   array=jhk[jhk.colnames[3]])
col2 = fits.Column(name=jhk.colnames[4],
                   format=jhk.dtype[4],
                   array=jhk[jhk.colnames[4]])
col3 = fits.Column(name=jhk.colnames[9],
                   format=jhk.dtype[9],
                   array=jhk[jhk.colnames[9]])
col4 = fits.Column(name=jhk.colnames[11],
                   format=jhk.dtype[11],
                   array=jhk[jhk.colnames[11]])
col5 = fits.Column(name=jhk.colnames[13],
                   format=jhk.dtype[13],
                   array=jhk[jhk.colnames[13]])
col6 = fits.Column(name=jhk.colnames[15],
                   format=jhk.dtype[15],
Ejemplo n.º 29
0
        tmp_ang = stomp.AngularCoordinate(np.double(obj[args.ra_name]),
                                          np.double(obj[args.dec_name]),
                                          stomp.AngularCoordinate.Equatorial)
        # Test the current catalog object and see if it is contained in the
        # stomp map geometry. Store the result.
        mask[idx] = stomp_map.Contains(tmp_ang)
        if args.n_regions is not None and mask[idx]:
            region_array[idx] = stomp_map.FindRegion(tmp_ang)
    print("\tkept %i / %i" % (data[mask].shape[0], data.shape[0]))

    # Write file to disk and close the currently open fits file.
    col_list = []
    for idx in xrange(len(data.names)):
        if args.n_regions is not None and \
           data.names[idx] == args.region_column_name:
            continue
        col_list.append(
            fits.Column(name=data.names[idx],
                        format=data.formats[idx],
                        array=data[data.names[idx]][mask]))
    if args.n_regions is not None:
        col_list.append(
            fits.Column(name=args.region_column_name,
                        format='I',
                        array=region_array[mask]))
    out_tbhdu = fits.BinTableHDU.from_columns(col_list)
    out_tbhdu.writeto(args.output_fits_file, overwrite=True)
    hdu.close()
    # Done
Ejemplo n.º 30
0
def dict_to_hdu(d, name=None, hdr=None, force_to_bintbl=False):
    """
    Write a dictionary to a fits HDU.

    Elements in the dictionary that are integers, floats, or strings
    (specific numpy types or otherwise) are written to the HDU
    header. The header keywords are identical to the dictionary keys.

    If any of the elements in the dictionary are an
    `astropy.table.Table`_, that dictionary can *only* contain that
    table and single values that will be written to the extension
    header. That is, there can be only one `astropy.table.Table`_
    element, and none of the elements can be a :obj:`list` or
    `numpy.ndarray`_. By default the extension name is the dictionary
    key for the `astropy.table.Table`_ item; this can be overridden
    using the ``name`` argument.

    Elements in the dictionary that are a list or a `numpy.ndarray`_
    are written as either an image (if there is only one array and a
    binary table is not specifically requested using
    ``force_to_bintbl``) or a series of table columns. The lists are
    assumed to be interpretable as the ``array`` argument of
    `astropy.io.fits.Column`_ (for a table) or the ``data`` argument
    of `astropy.io.fits.ImageHDU`_ (for an image).

        - If an image is to be written, the extension name, by
          default, is the dictionary key for the array item; this can
          be overridden using the ``name`` argument.

        - If a table is to be written, the method checks that the
          relevant arrays have a consistent number of rows. If they
          do not, the format and dimensions of the table written are
          set so that the arrays are contained in a single row. The
          column names in the table are identical to the dictionary
          keywords. In this case, ``name`` must be provided if you
          want the extension to have a name; there is no default
          name.

    Args:
        d (:obj:`dict`):
            Dictionary with data to write to the
            `astropy.io.fits.BinTableHDU`_.
        name (:obj:`str`, optional):
            Name to give the HDU extension. If None and the input is
            a dictionary with a single array or
            `astropy.table.Table`_ to write, the name of the
            extension is the relevant dictionary keyword. Any
            provided value for ``name`` will override this behavior.
            If the provided dictionary is used to construct a table,
            where the dictionary keys are used for the table column
            names, there is no default name for the extension (i.e.,
            no extension name is used if ``name is None``).
        hdr (`astropy.io.fits.Header`_, optional):
            Base-level header to include in the HDU. If None, an
            empty header is used and then added to.
        force_to_bintbl (:obj:`bool`, optional):
            Force construction of a `astropy.io.fits.BinTableHDU`_ instead of an
            `astropy.io.fits.ImageHDU`_ when either there are no arrays or
            tables to write or only a single array is provided.

    Returns:
        `astropy.io.fits.ImageHDU`_, `astropy.io.fits.BinTableHDU`_:
        HDU with the data. An `astropy.io.fits.ImageHDU`_ object is
        returned if there is 1 (or fewer) array-like objects in the
        dictionary. Otherwise, an `astropy.io.fits.BinTableHDU`_
        object is returned with the data.

    Raises:
        TypeError:
            Raised if the input object is not a dictionary or the
            method cannot interpret how to use an element of the
            dictionary.
        ValueError:
            Raised if dictionary contains another dictionary, more
            than one `astropy.table.Table`_ object, or both an
            `astropy.table.Table`_ and an array-like object
            (:obj:`list` or `numpy.ndarray`_).
    """
    # Check the input is a dictionary (not very pythonic...)
    if not isinstance(d, dict):
        raise TypeError('Input must be a dictionary.')
    # Check the dictionary contents
    ndict = numpy.sum([isinstance(d[key], dict) for key in d.keys()])
    if ndict > 0:
        raise ValueError('Cannot write nested dictionaries.')
    ntab = numpy.sum([isinstance(d[key], Table) for key in d.keys()])
    if ntab > 1:
        raise ValueError(
            'Cannot write dictionaries with more than one astropy.table.Table.'
        )
    narr = numpy.sum(
        [isinstance(d[key], (list, numpy.ndarray)) for key in d.keys()])
    if ntab > 0 and narr > 0:
        raise ValueError(
            'Cannot write dictionaries with both arrays and Tables.')

    # Write any header data and find arrays and Tables
    _hdr = fits.Header() if hdr is None else hdr.copy()
    array_keys = []
    table_keys = []
    for key in d.keys():
        if d[key] is None:
            continue
        # TODO: may be better to do this
        #   isinstance(d[key], (collections.Sequence, numpy.ndarray)):
        # This ignores the defined otype...
        if isinstance(d[key], (list, numpy.ndarray)):
            array_keys += [key]
        elif isinstance(d[key], Table):
            table_keys += [key]
        elif isinstance(d[key],
                        (int, numpy.integer, float, numpy.floating, str)):
            _hdr[key.upper()] = d[key]
        else:
            raise TypeError(
                'Do not know how to write object with type {0}'.format(
                    type(d[key])))

    # If there aren't any arrays or tables, return an empty ImageHDU or
    # BinTableHDU with just the header data.
    if len(array_keys) < 1 and len(table_keys) < 1:
        return fits.BinTableHDU(header=_hdr, name=name) if force_to_bintbl \
                    else fits.ImageHDU(header=_hdr, name=name)

    # If there's only a single array, return it in an ImageHDU or, if
    # requested, a BinTableHDU
    if len(array_keys) == 1 and not force_to_bintbl:
        return fits.ImageHDU(data=d[array_keys[0]],
                             header=_hdr,
                             name=array_keys[0] if name is None else name)

    # If there's only a single Table, return it in a BinTableHDU
    if len(table_keys) == 1:
        # TODO: If we pass hdr directly, does this call include any
        # table meta?
        return fits.BinTableHDU(data=d[table_keys[0]],
                                header=_hdr,
                                name=table_keys[0] if name is None else name)

    # Only remaining option is to build a BinTableHDU based on the
    # dictionary contents.

    # Do all arrays have the same number of rows?
    single_row = len(
        numpy.unique([numpy.asarray(d[key]).shape[0]
                      for key in array_keys])) > 1

    # If the number of rows is inconsistent, save the data in a single
    # row. Otherwise, save the data as a multi-row table.
    cols = []
    for key in array_keys:
        _d = numpy.asarray(d[key])
        # TODO: This barfs if the array to write is a multi-dimensional string
        # array.  This has a direct effect on saving the MultiSlitFlexure object
        # if we want to set 'det' to strings.  There is a hack there that
        # converts between strings and integers for reading and writing the
        # object...
        cols += [
            fits.Column(name=key,
                        format=rec_to_fits_type(_d, single_row=single_row),
                        dim=rec_to_fits_col_dim(_d, single_row=single_row),
                        array=numpy.expand_dims(_d, 0) if single_row else _d)
        ]
    return fits.BinTableHDU.from_columns(cols, header=_hdr, name=name)