Beispiel #1
0
def fits_writer(data,
                fitsheader,
                colnames,
                desc,
                filename="evtname.fits",
                binary=False):
    '''
    This function takes in several data arrays and lists of strings to produce
    a FITS data table. The table is saved as 'filename'.
    
    Parameters:
    -----------
    data:         list
                  A list of numpy 1D array objects.
    fitsheader:   PyFITS .header object
                  Header to be included in the FITS file.
    colnames:     string list
                  Each entry is one column name.
    desc:         string list
                  Each entry is the description for a column.
    filename:     string
                  The FITS table will be saved to the (optional) filename.
    binary:       boolean
                  Determines type of table. Choose True for binary, False for
                  ascii. Default is False.
    
    Returns:
    -------
    None
        
    Notes:
    -----
    All string lists MUST be lists, even if only containing one element.

    'fitsheader' should be fitsfile.header when the file 'fitsfile.fits' is
    read into Python.

    Example:
    -------
    >>> import numpy as np
    >>> import irsa
    >>>
    >>> data     = [ np.arange(10), np.arange(10.) ]
    >>> header   = fitsfile.header
    >>> colnames = ['col1','col2']
    >>> desc     = ['first column', 'second column']
    >>> irsa.fits_writer(data, header, colnames, desc)
    >>>
    
    Revisions:
    ----------
    2011-07-13: [email protected]  initial version
    2012-12-18: [email protected]  changed width calculation method to
                                         stop underestimating the width
    '''

    # Define constants
    allnum = '0123456789'  # integers to be stripped from string
    padding = 3  # number of extra spaces in each column
    #    declen  = 10             # max number of decimal places
    #    intform    = 'I6'          # integer column format
    #    floatform  = 'F18.7'      # float column format
    #    doubleform = 'D18.7'      # double column format
    #    charform   = 'S16'         # character column format

    filename = filename + '.fits'

    # Split each value into a whole number and a decimal, then get the length
    # of each. Store the largest whole number and decimal lengths per column.
    # Finally, combine largest whole number and decimal lengths to get a column
    # width. Keep column width and the decimal length for use in FITS
    # formatting code.
    maxlen = np.zeros(len(data), dtype='int64')  # Maximum column length
    maxdec = np.zeros(len(data), dtype='int64')  # Maximum decimal length
    maxpredec = np.zeros(len(data),
                         dtype='int64')  # Largest whole number per column
    maxpostdec = np.zeros(len(data),
                          dtype='int64')  # Largest decimal per column
    for column in range(len(data)):
        for row in range(len(data[column])):
            # Check if value is a float
            # print(data[column][row])
            if str(data[column].dtype).rstrip(allnum) == 'float':
                # Check if float is a nan/inf.
                # Can't split by '.' if it is a nan/inf!
                if np.isnan(data[column][row]):
                    predec = len('nan')
                    postdec = 0
                elif np.isinf(data[column][row]):
                    predec = len('inf')
                    postdec = 0
                else:
                    predec = len(str(data[column][row]).split('.')[0]) + 1
                    postdec = len(str(data[column][row]).split('.')[1])
            else:
                predec = len(str(data[column][row]))
                postdec = 0
            # Store the largest whole number and decimal number lengths per
            if maxpredec[column] < predec:  # column.
                maxpredec[column] = predec
            if maxpostdec[column] < postdec:
                maxpostdec[column] = postdec
        # Add the two lengths together to get a column width.
        maxlen[column] = maxpredec[column] + maxpostdec[column]
        maxdec[column] = maxpostdec[column]  # Keep the decimal width for later

    # Load data into list of column objects
    fitscol = []
    for column in range(len(data)):
        colname = colnames[column]
        coldata = data[column]
        # Get number of decimal places. If zero, do not include in 'length'.
        if maxdec[column] == 0:
            length = str(maxlen[column] + padding)
        else:
            length = str(maxlen[column] + padding) + '.' + str(maxdec[column])
        # Determine the data type of the column
        if str(coldata.dtype).rstrip(allnum) == 'int':
            coltype = 'I' + length
        elif str(coldata.dtype).rstrip(allnum) == 'float' and binary == False:
            coltype = 'F' + length
        elif str(coldata.dtype).rstrip(allnum) == 'float' and binary == True:
            coltype = 'E' + length
        elif str(coldata.dtype).rstrip(allnum) == '|S':
            coltype = 'A' + length
        else:
            coltype = 'F' + length
        col = fits.Column(name=colname, format=coltype, array=coldata)
        fitscol.append(col)

    # Generate a table HDU object
    if binary == False:
        tbhdu = fits.TableHDU.from_columns(fitscol)
    else:
        tbhdu = fits.BinTableHDU.from_columns(fitscol)

    # Add comments to the table header
    tbheader = tbhdu.header
    for column in range(len(data)):
        tbnum = column + 1  # the first column is 1, not 0
        ttype = 'TTYPE' + str(tbnum)
        # Update existing 'TTYPE' key with comment from 'desc'
        tbheader[ttype] = (tbheader[ttype], desc[column])

    # Make FITS file using 'fitsheader' and 'tbhdu'
    hdu = fits.PrimaryHDU(None, fitsheader)
    tbhdulist = fits.HDUList([hdu, tbhdu])
    #    try:
    tbhdulist.writeto(filename)
    #    except:
    #        print('Format error occurred while writing file. Attempting to fix.')
    #        tbhdulist[0].verify('fix')
    #        tbhdulist[1].verify('fix')
    #        tbhdulist.writeto(filename)

    return
Beispiel #2
0
def make_bg(GZ='.gz'):
    """
    Make average background images with object masks
    """
    files = glob.glob('ibhm*flt.seg.fits')
    PATH = '/research/HST/GRISM/3DHST/COSMOS/RAW/'
    PATH = '/3DHST/Spectra/Work/COSMOS/RAW/'
    
    fp = open('COSMOS.g141.list')
    files = fp.readlines()
    fp.close()
    for i in range(len(flt_files)):
        files[i] = files[i][:-1].replace('msk','flt')
    
    files = glob.glob('ib37*flt.seg.fits')
    PATH = '/research/HST/GRISM/3DHST/GOODS-N/RAW/'
    
    #### Direct flat-field
    flat = pyfits.open(IREF+'/uc721143i_pfl.fits')[1].data[5:-5,5:-5]
    flat[flat <= 0] = 5
    flat[flat > 5] = 5
    
    #### Candels
    os.chdir('/Users/gbrammer/CANDELS/Flats/')
    files = np.array(glob.glob('ib*flt.seg.fits'))
    PATH = '/Users/gbrammer/CANDELS/UDS/RAW/'
    
    info = catIO.Readfile(PATH+'../PREP_FLT/files.info')
    
    files = files[info.filter == 'F125W']
    flat = pyfits.open(IREF+'/uc72113qi_pfl.fits')[1].data[5:-5,5:-5]
    
    NF = len(files)
    idx = np.arange(NF)
    X = np.zeros((NF,1014.**2))
        
    ## Otherwise get it from "show_profile" above
    test = idx > -10
    
    for j,i in enumerate(idx):
        if ~test[i]:
            continue
        #
        fi = files[i].replace('.seg','')
        if not os.path.exists(fi.replace('flt','flt.seg')+GZ):
            continue
        #    
        if os.path.exists(fi+'.mask.reg'):
            continue
        #
        print('%d %s' %(i, files[i]))
        flt = pyfits.open(PATH+fi+'.gz')
        flt[1].data *= flat
        ### Segmentation mask
        masked = pyfits.open(fi.replace('flt','flt.seg')+GZ)[0].data == 0
        ### DQ mask, hot pixels and the "death star"
        dq_ok = (flt[3].data & (4+32+16)) == 0
        #
        ok = masked & np.isfinite(flt[1].data) & (dq_ok)
        flt[1].data /= np.median(flt[1].data[ok])
        flt[1].data[(ok == False)] = 0
        X[j,:] = flt[1].data.flatten()

    #### Average
    nsum = np.sum(X != 0, axis=0).reshape(1014,1014)
    avg = np.sum(X, axis=0).reshape(1014,1014)/nsum
     
    ### Fill empty pixels with no input images
    sky = avg
    x,y = np.where((np.isfinite(sky) == False) | (sky/flat > 1.15))
    NX = len(x)
    pad = 1
    for i in range(NX):
        xi = x[i]
        yi = y[i]
        sub = sky[xi-pad:xi+pad+2,yi-pad:yi+pad+2]
        if (np.sum(sub) != 0.0):
            sky[xi,yi] = np.median(sub[np.isfinite(sub)])
    
    still_bad = (np.isfinite(sky) == False) | (sky <= 0.01)
    sky[still_bad] = flat[still_bad]
    
    # bad_flat = (flat < 0.5)
    # sky[bad_flat] = flat[bad_flat]
        
    im_sky = pyfits.PrimaryHDU(data=sky)
    im_n = pyfits.ImageHDU(data=nsum)
    im = pyfits.HDUList([im_sky, im_n])
    im.writeto('sky.fits', clobber=True)
    
    #### for DIRECT flat
    flatim = pyfits.open(IREF+'/uc721143i_pfl.fits')
    flatim[1].data[5:-5,5:-5] = sky
    flatim[3].data[5:-5,5:-5] = nsum
Beispiel #3
0
                        'TELESCOP' : 'BURSTCUBE',
                        'INSTRUME' : 'BURSTCUBE',
                        'TRIGTIME' : 387128425.854846,
                        'OBJECT' : 'TESTOBJ',
                        'RADECSYS' : 'FK5',
                        'EQUINOX' : 2000.0,
                        'RA_OBJ' : 30.0000,
                        'DEC_OBJ' : -15.000,
                        'ERR_RAD' : 3.000,
                        'TSTART' : 387128290.1728720,
                        'TSTOP' : 387128904.582618,
                        'TRIGTIME' : 387128425.854846,
                        'HDUCLASS' : 'OGIP',
                        'HDUCLAS1' : 'EVENTS'}
    

    primaryheader = fits.Header(primarykeywords)
    
    # Construct the output HDUs
    primary_hdu = fits.PrimaryHDU(header=primaryheader)
    ebounds_hdu = bintablehdu_constructor(channels,('CHANNEL','E_MIN','E_MAX'), ('short','float','float'),'EBOUNDS')
    events_hdu = bintablehdu_constructor(eventpairs,('TIME','PHA'), ('double','short'),'EVENTS')
    gti_hdu = bintablehdu_constructor([[tstart,tstop]],('START','STOP'), ('double','double'), 'GTI')
    hdulist = fits.HDUList([primary_hdu,ebounds_hdu,events_hdu,gti_hdu])

    # Add additional keywords to headers
    
    
    # Write out the tte file
    hdulist.writeto(outfile)
def main():
    import sys
    import fitsio
    from astropy.io import fits
    import itertools
    import numpy as np
    sys.path.insert(0, '/home/dfa/sobreira/alsina/alpha-beta-gamma/code/src')
    #sys.path.insert(0, '/global/cscratch1/sd/alsina/alpha-beta-gamma/code/src')

    args = parse_args()

    #Make directory where the ouput data will be
    outpath = os.path.expanduser(args.outpath)
    try:
        if not os.path.exists(outpath):
            os.makedirs(outpath)
    except OSError:
        if not os.path.exists(outpath): raise

    namesout = ['TAU0', 'TAU2', 'TAU5']
    if args.xim:
        args.tausjnfile = '/home/dfa/sobreira/alsina/alpha-beta-gamma/code/correlations/alltausm_jk.fits'
    alldata = fitsio.read(args.tausjnfile, ext=1)

    names = ['BIN1', 'BIN2', 'ANGBIN', 'VALUE', 'ANG']
    forms = ['i4', 'i4', 'i4', 'f8', 'f8']
    dtype = dict(names=names, formats=forms)
    nrows = 20
    outdata = np.recarray((nrows, ), dtype=dtype)

    a = [i for i in range(0, nrows)]
    b = [j for j in range(0, nrows)]
    bin_pairs = []
    for p in itertools.product(a, b):
        bin_pairs.append(p)

    for nam in namesout:
        corr = []
        covmat = np.zeros(shape=(nrows, nrows))
        for i, j in bin_pairs:
            bin1 = (alldata['ANGBIN'] == i)
            bin2 = (alldata['ANGBIN'] == j)
            a = alldata[nam][bin1]
            b = alldata[nam][bin2]
            covmat[i, j] = np.cov(a, b)[0][1]
            if (i == j):
                corr.append(np.mean(alldata[nam][bin1]))

        hdu = fits.PrimaryHDU()
        hdul = fits.HDUList([hdu])
        covmathdu = fits.ImageHDU(covmat, name='COVMAT')
        hdul.insert(1, covmathdu)

        angarray = alldata['THETA'][alldata['JKR'] == 0]
        valuearray = np.array(corr)
        bin1array = np.array([-999] * nrows)
        bin2array = np.array([-999] * nrows)
        angbinarray = np.arange(nrows)
        array_list = [bin1array, bin2array, angbinarray, valuearray, angarray]
        for array, name in zip(array_list, names):
            outdata[name] = array

        corrhdu = fits.BinTableHDU(outdata, name=nam)
        hdul.insert(2, corrhdu)
        if args.xim:
            hdul.writeto(outpath + nam + 'm.fits', clobber=True)
        else:
            hdul.writeto(outpath + nam + 'p.fits', clobber=True)
Beispiel #5
0
import numpy as np
from astropy.io import fits

source = 'DSv'
fnum = 10
detectnum = 5

home = f'/blue/adamginsburg/d.jeff/SgrB2DSreorg/field{fnum}/CH3OH/'
sourcepath = home + f'{source}/200K_field10originals_z0_000190713_5-6mhzwidth_stdfixes/'
print(f'Accessing images in {sourcepath}')
transitionimg = fits.open(
    sourcepath + "texmap_3transmask_3sigma_allspw_withnans_weighted.fits")[0]
teximg = fits.open(sourcepath +
                   "texmap_3sigma_allspw_withnans_weighted.fits")[0]

transitionmap = transitionimg.data
texmap = teximg.data

print('Masking temperature map')
maskedarr = np.ma.masked_where(transitionmap < detectnum, texmap)
maskedmap = maskedarr.filled(fill_value=np.nan)

print('Creating new fits file')
maskedhdu = fits.PrimaryHDU(maskedmap)
maskedhdu.header = teximg.header
maskedfits = fits.HDUList(maskedhdu)
maskedoutpath = sourcepath + f"texmap_{detectnum}transmask_3sigma_allspw_withnans_weighted.fits"
print(f'Saving at {maskedoutpath}')
maskedfits.writeto(maskedoutpath)
print('Done')
def mkmaster(instrument, fn_dict, mtype, fmin=5, master_dir='./'):
    """
    PURPOSE:        
        Make master calibration frames (bias, dark, flat)
        * currently no outlier rejection other than median combine
    INPUT:
        instrument  - instrument name defined in instrument_dict (ex. 'ratir')
        fn_dict     - dictionary output by choose_calib() containing organized 
                      fits file names.  can also provide file name of pickled dictionary.
        mtype       - type of master frame. should be either 'flat', 'dark' or 'bias'
        fmin        - minimum number of files needed to make a master frame
    EXAMPLE:
        mkmaster('ratir', fn_dict=output from choose_calib(), mtype = bias, dark or flat name)
    FUTURE IMPROVEMENTS:
        - Better outlier rejection
    """
    instrum = instrument_dict[instrument]

    # check if input is a file name
    if type(fn_dict) is str:
        if fn_dict.split('.')[-1] == 'p':
            af.print_bold("Loading pickled dictionary from file.")
            fn_dict = pickle.load(open(fn_dict, 'rb'))
        else:
            af.print_err("Invalid pickle file extension detected. Exiting...")
            return

    # check for valid mtype
    if mtype not in [instrum.flatname, instrum.biasname, instrum.darkname]:
        af.print_err(
            "Error: valid arguments for mtype are {}, {} and {}. Exiting...".
            format(instrum.flatname, instrum.biasname, instrum.darkname))
        return

    bands = fn_dict.keys()

    sorttype = 'BAND'

    if mtype in [instrum.biasname, instrum.darkname]:
        sorttype = 'CAMERA'

    d = os.getcwd().split('/')[-1]  # name of current directory
    af.print_head("\nMaking master {} frame in {}:".format(mtype, d))

    # work on FITs files for specified photometric bands
    for band in bands:

        # print current band
        af.print_under("\n{:^50}".format('{} {}'.format(band, sorttype)))

        first_file = fn_dict[band][0]
        ind_C = first_file.index('C')
        cam = first_file[ind_C:ind_C + 2]
        cam_i = int(cam[1])

        # check if required files are present
        if instrum.has_cam_bias(cam_i):
            mbias_fn = '{}_{}.fits'.format(instrum.biasname, cam)
        else:
            mbias_fn = None

        if instrum.has_cam_dark(cam_i):
            mdark_fn = '{}_{}.fits'.format(instrum.darkname, cam)
        else:
            mdark_fn = None

        if mtype is not instrum.biasname:
            if mbias_fn is not None:
                if not os.path.exists(mbias_fn):
                    af.print_err(
                        'Error: {} not found.  Move master bias file to working directory to proceed.'
                        .format(mbias_fn))
                    continue
            if mdark_fn is not None:
                if (mtype is
                        instrum.flatname) and (not os.path.exists(mdark_fn)):
                    af.print_err(
                        'Error: {} not found.  Move master dark file to working directory to proceed.'
                        .format(mdark_fn))
                    continue

        # check dictionary entries
        fns = fn_dict[band]
        if len(fns) < fmin:
            if len(fns) == 0:
                af.print_err(
                    'Error: no frames available to make master {} for {} {}.'.
                    format(mtype, band, sorttype.lower()))
                continue
            else:
                temp = raw_input(
                    af.bcolors.WARNING +
                    "Only {} frames available to make master {} for {} {}.  Continue? (y/n): "
                    .format(len(fns), mtype, band, sorttype.lower()) +
                    af.bcolors.ENDC)
                if temp.lower() != 'y' and temp.lower() != 'yes':
                    af.print_warn("Skipping {}...".format(band))
                    continue

        # load calibration data
        hdu = pf.PrimaryHDU()
        filter_arr = []  # to check that all frames used the same filter
        exptime_arr = [
        ]  # to check that all frames have the same exposure time (where relevant)
        data_arr = []
        i = 0
        for fn in fns:
            print fn
            hdu.header[FITS_IN_KEY(
                i)] = fn  # add flat fn to master flat header
            hdulist = pf.open(fn)
            data_arr.append(hdulist[0].data)
            filter_arr.append(hdulist[0].header['FILTER'])
            exptime_arr.append(hdulist[0].header['EXPTIME'])
            i += 1
        data_arr = np.array(data_arr, dtype=np.float)

        # check that frames match
        for i in range(len(fns) - 1):
            if (filter_arr[i + 1] != filter_arr[0]) and (mtype is
                                                         instrum.flatname):
                af.print_err(
                    "Error: cannot combine flat frames with different filters. Skipping {} {}..."
                    .format(band, sorttype.lower()))
                continue
            if (exptime_arr[i + 1] != exptime_arr[0]) and (mtype is
                                                           instrum.darkname):
                af.print_err(
                    "Error: cannot combine dark frames with different exposure times. Skipping {} {}..."
                    .format(band, sorttype.lower()))
                continue
        if instrum.flatname:
            hdu.header['FILTER'] = filter_arr[
                0]  # add filter keyword to master frame
        if instrum.darkname:
            hdu.header['EXPTIME'] = exptime_arr[
                0]  # add exposure time keyword to master frame

        # add CAMERA header keyword
        hdu.header['CAMERA'] = cam_i  # add camera keyword to master frame

        # crop bias frames
        if mtype is instrum.biasname:
            data_arr = data_arr[(np.s_[:], instrum.slice(cam)[0],
                                 instrum.slice(cam)[1])]
        # crop dark frames and perform calculations
        elif mtype is instrum.darkname:
            data_arr = data_arr[(np.s_[:], instrum.slice(cam)[0],
                                 instrum.slice(cam)[1])]
            data_arr = (data_arr - pf.getdata(mbias_fn)
                        ) / hdu.header['EXPTIME']  # calculate dark current
        # crop flat frames and perform calculations
        elif mtype is instrum.flatname:
            if mbias_fn is not None:
                mbd = pf.getdata(mbias_fn)
                mbd = mbd
            if mdark_fn is not None:
                mdd = pf.getdata(mdark_fn)
                mdd = mdd

            if instrum.is_cam_split(cam_i):
                pass  # split data is already cropped
            else:
                data_arr = data_arr[(np.s_[:], instrum.slice(cam)[0],
                                     instrum.slice(cam)[1])]

            for i in range(len(exptime_arr)):
                if mbias_fn is not None:
                    data_arr[i] -= mbd
                if mdark_fn is not None:
                    data_arr[i] -= mdd * exptime_arr[i]
                data_arr[i] /= np.median(data_arr[i])

        # make master frame
        master = af.imcombine(data_arr, type='median').astype(np.float)

        # add master to hdu
        if mtype is instrum.flatname:
            hdu.data = master / np.median(master)  # normalize master flat
        else:
            hdu.data = master

        # save master to fits
        hdulist = pf.HDUList([hdu])
        try:
            hdulist.writeto('{}{}_{}.fits'.format(master_dir, mtype, band),
                            clobber=True)
        except IOError:
            os.mkdir(master_dir)
            hdulist.writeto('{}{}_{}.fits'.format(master_dir, mtype, band),
                            clobber=True)
Beispiel #7
0
    col8 = fits.Column(name='RWAV', format='D', array=np.array(a8))
    col9 = fits.Column(name='ZFLUX',
                       format='PD()',
                       array=np.array(a9, dtype=np.object))
    col10 = fits.Column(name='ZAND',
                        format='PI()',
                        array=np.array(a10, dtype=np.object))
    col11 = fits.Column(name='ZINV',
                        format='PD()',
                        array=np.array(a11, dtype=np.object))
    col12 = fits.Column(name='ZWAV', format='D', array=np.array(a12))
    col13 = fits.Column(name='SPECTYPE', format='I', array=np.array(a13))
    col14 = fits.Column(name='TRUEZ', format='D', array=np.array(a14))
    col15 = fits.Column(name='TARGETID', format='I', array=np.array(a15))
    cols = fits.ColDefs([
        col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11,
        col12, col13, col14, col15
    ])
    tbhdu = fits.BinTableHDU.from_columns(cols)
    prihdr = fits.Header()
    prihdr['FIBER'] = C_Fiber
    prihdr['OBJNO'] = len(a5)
    prihdu = fits.PrimaryHDU(header=prihdr)
    file_dir = desidir + slash + np.str(C_Fiber) + slash
    os.mkdir(file_dir)
    file_name = file_dir + "New" + np.str(C_Fiber)
    print(file_name)
    thdulist = fits.HDUList([prihdu, tbhdu])
    thdulist.writeto(file_name)
    i = i + 1
Beispiel #8
0
    def to_fits(self, header=None, energy_unit='TeV', **kwargs):
        """
        Convert RM to FITS HDU list format.

        Parameters
        ----------
        header : `~astropy.io.fits.Header`
            Header to be written in the fits file.
        energy_unit : str
            Unit in which the energy is written in the HDU list

        Returns
        -------
        hdulist : `~astropy.io.fits.HDUList`
            RMF in HDU list format.

        Notes
        -----
        For more info on the RMF FITS file format see:
        http://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/summary/cal_gen_92_002_summary.html

        """
        self._e_reco = self._e_reco.to(energy_unit)
        self._e_true = self._e_true.to(energy_unit)

        # Cannot use table_to_fits here due to variable length array
        # http://docs.astropy.org/en/v1.0.4/io/fits/usage/unfamiliar.html
        table = self.to_table()
        cols = table.columns

        c0 = fits.Column(name=cols[0].name, format='E', array=cols[0],
                         unit='{}'.format(cols[0].unit))
        c1 = fits.Column(name=cols[1].name, format='E', array=cols[1],
                         unit='{}'.format(cols[1].unit))
        c2 = fits.Column(name=cols[2].name, format='I', array=cols[2])
        c3 = fits.Column(name=cols[3].name, format='PI()', array=cols[3])
        c4 = fits.Column(name=cols[4].name, format='PI()', array=cols[4])
        c5 = fits.Column(name=cols[5].name, format='PE()', array=cols[5])

        hdu = fits.BinTableHDU.from_columns([c0, c1, c2, c3, c4, c5])

        if header is None:
            header = hdu.header

            header['EXTNAME'] = 'MATRIX', 'name of this binary table extension'
            header['TELESCOP'] = 'DUMMY', 'Mission/satellite name'
            header['INSTRUME'] = 'DUMMY', 'Instrument/detector'
            header['FILTER'] = '', 'Filter information'
            header['CHANTYPE'] = 'PHA', 'Type of channels (PHA, PI etc)'
            header['HDUCLASS'] = 'OGIP', 'Organisation devising file format'
            header['HDUCLAS1'] = 'RESPONSE', 'File relates to response of instrument'
            header['HDUCLAS2'] = 'RSP_MATRIX', 'Keyword information for Caltools Software.'
            header['HDUVERS '] = '1.3.0', 'Version of file format'

            # Obsolet RMF headers, included for the benefit of old software
            header['RMFVERSN'] = '1992a', 'Obsolete'
            header['HDUVERS1'] = '1.1.0', 'Obsolete'
            header['HDUVERS2'] = '1.2.0', 'Obsolete'

        for key, value in kwargs.items():
            header[key] = value

        header['DETCHANS'] = self._e_reco.nbins, 'Total number of detector PHA channels'
        header['TLMIN4'] = 0, 'First legal channel number'
        # E_reco nbins - 1 
        header['TLMAX4'] = len(self._e_reco) - 2, 'Highest legal channel number'
        numgrp, numelt = 0, 0
        #for val, val2 in zip(hdu.data['N_GRP'], hdu.data['N_CHAN']):
        #    numgrp += np.sum(val)
        #    numelt += np.sum(val2)
        #header['NUMGRP'] = numgrp, 'Total number of channel subsets'
        #header['NUMELT'] = numelt, 'Total number of response elements'
        header['LO_THRES'] = self.pdf_threshold, 'Lower probability density threshold for matrix'
        hdu.header = header

        hdu2 = self._e_reco.to_ebounds()

        prim_hdu = fits.PrimaryHDU()
        return fits.HDUList([prim_hdu, hdu, hdu2])

        return hdu_list
def make_savefile(anchor_fits, simname, haloname, simdir, DD, ds, ad):

    #a = fits.open(fits_name)
    '''
    mss = a['STAR_MASS'].data
    id_s = a['STARS_ID'].data
    xs, ys, zs = a['STARS_GAL_POSITION'].data
    vxs, vys, vzs = a['STARS_GAL_VELOCITY'].data
    ep_s = a['STARS_EPSILON_FIXED'].data
    xs_box, ys_box, zs_box = a['STARS_BOX_POSITION'].data
    vxs_box, vys_box, vzs_box = a['STARS_BOX_VELOCITY'].data
    '''
    DDname = 'DD%.4i' % DD
    #ds = yt.load('%s/%s/%s/%s/%s'%(simdir, haloname, simname,  DDname, DDname))
    #ad = ds.all_data()
    #def _stars(pfilter, data): return data[(pfilter.filtered_type, "particle_type")] == 2

    #yt.add_particle_filter("stars",function=_stars, filtered_type='all',requires=["particle_type"])
    #ds.add_particle_filter('stars')

    mss = ad['stars', 'particle_mass'].in_units('Msun')
    xs_box = ad['stars', 'particle_position_x'].in_units('kpc')
    ys_box = ad['stars', 'particle_position_y'].in_units('kpc')
    zs_box = ad['stars', 'particle_position_z'].in_units('kpc')

    vxs_box = ad['stars', 'particle_velocity_x'].in_units('km/s')
    vys_box = ad['stars', 'particle_velocity_y'].in_units('km/s')
    vzs_box = ad['stars', 'particle_velocity_z'].in_units('km/s')

    id_s = ad['stars', 'particle_index']

    hdus = []
    prim_hdu = fits.PrimaryHDU()
    hdus.append(prim_hdu)

    for sat_n in arange(shape(anchor_fits)[0]):
        anchor_ids = anchor_fits[sat_n]
        gd_indices = []
        for anch_id in anchor_ids:
            match = where(id_s == anch_id)[0]
            if len(match) > 0: gd_indices.append(int(match))
        gd_indices = array(gd_indices)

        if len(gd_indices) > 10:
            print 'more than 10 anchor stars found for sat %i in DD%.4i' % (
                sat_n, DD)
            anchor_mss = mss[gd_indices]
            anchor_xs_box = xs_box[gd_indices]
            anchor_ys_box = ys_box[gd_indices]
            anchor_zs_box = zs_box[gd_indices]
            anchor_vxs_box = vxs_box[gd_indices]
            anchor_vys_box = vys_box[gd_indices]
            anchor_vzs_box = vzs_box[gd_indices]

            anchor_R = sqrt(anchor_xs_box**2. + anchor_ys_box**2. +
                            anchor_zs_box**2.)

            hist_R, r_edges = np.histogram(anchor_R.value,
                                           weights=anchor_mss.value,
                                           bins=arange(
                                               min(anchor_R.value) - 20,
                                               max(anchor_R.value) + 20, 10))

            Rmid = np.mean(
                [r_edges[argmax(hist_R)], r_edges[argmax(hist_R) + 1]])
            good = where(abs(anchor_R.value - Rmid) < 5)[0]

            anchor_xs_box_avg, _ = weighted_avg_and_std(anchor_xs_box,
                                                        weights=anchor_mss,
                                                        good=good)
            anchor_ys_box_avg, _ = weighted_avg_and_std(anchor_ys_box,
                                                        weights=anchor_mss,
                                                        good=good)
            anchor_zs_box_avg, _ = weighted_avg_and_std(anchor_zs_box,
                                                        weights=anchor_mss,
                                                        good=good)
            anchor_vxs_box_avg, _ = weighted_avg_and_std(anchor_vxs_box,
                                                         weights=anchor_mss,
                                                         good=good)
            anchor_vys_box_avg, _ = weighted_avg_and_std(anchor_vys_box,
                                                         weights=anchor_mss,
                                                         good=good)
            anchor_vzs_box_avg, _ = weighted_avg_and_std(anchor_vzs_box,
                                                         weights=anchor_mss,
                                                         good=good)

            box_avg = [
                anchor_xs_box_avg, anchor_ys_box_avg, anchor_zs_box_avg,
                anchor_vxs_box_avg, anchor_vys_box_avg, anchor_vzs_box_avg
            ]

            cols1 = fits.ColDefs([
                fits.Column(name='box_avg', array=box_avg, format='D'),
                fits.Column(name='anchor_mss     ',
                            array=anchor_mss,
                            format='D'),
                fits.Column(name='anchor_xs_box  ',
                            array=anchor_xs_box,
                            format='D'),
                fits.Column(name='anchor_ys_box  ',
                            array=anchor_ys_box,
                            format='D'),
                fits.Column(name='anchor_zs_box  ',
                            array=anchor_zs_box,
                            format='D'),
                fits.Column(name='anchor_vxs_box ',
                            array=anchor_vxs_box,
                            format='D'),
                fits.Column(name='anchor_vys_box ',
                            array=anchor_vys_box,
                            format='D'),
                fits.Column(name='anchor_vzs_box ',
                            array=anchor_vzs_box,
                            format='D'),
                fits.Column(name='ids_used_avg', array=good, format='I'),
            ])
        else:
            print 'less than 10 anchor stars found for sat %i in DD%.4i' % (
                sat_n, DD)
            cols1 = fits.ColDefs([
                fits.Column(name='box_avg     ', array=None, format='0D'),
                fits.Column(name='anchor_mss     ', array=None, format='0D'),
                fits.Column(name='anchor_xs_box  ', array=None, format='0D'),
                fits.Column(name='anchor_ys_box  ', array=None, format='0D'),
                fits.Column(name='anchor_zs_box  ', array=None, format='0D'),
                fits.Column(name='anchor_vxs_box ', array=None, format='0D'),
                fits.Column(name='anchor_vys_box ', array=None, format='0D'),
                fits.Column(name='anchor_vzs_box ', array=None, format='0D'),
                fits.Column(name='ids_used_avg', array=None, format='0D'),
            ])

        hdus.append(
            fits.BinTableHDU.from_columns(cols1, name='SAT_%.2i' % sat_n))

    hdus_fits = fits.HDUList(hdus)
    hdus_fits.writeto(
        '/nobackupp2/rcsimons/foggie_momentum/anchor_files/%s_DD%.4i_anchorprops.fits'
        % (simname, DD),
        overwrite=True)
Beispiel #10
0
 def to_hdulist(self):
     hdu = table_to_fits_table(self.to_table())
     prim_hdu = fits.PrimaryHDU()
     return fits.HDUList([prim_hdu, hdu])
Beispiel #11
0
    hdulist = pyfits.open(inputfile)

    outlist = [pyfits.PrimaryHDU()]

    for i in range(1, len(hdulist)):

        try:
            extname = hdulist[i].header['EXTNAME']

            delete_this_one = False
            for ext_to_delete in sys.argv[3:]:
                if (extname == ext_to_delete):
                    delete_this_one = True
                    break

            if (delete_this_one):
                print("Deleting extension %s ..." % (extname))
                continue

        except:
            pass

        print("Keeping extension %s ..." % (extname))
        outlist.append(hdulist[i])

    print("writing")
    hdu_out = pyfits.HDUList(outlist)

    hdu_out.writeto(outputfile, overwrite=True)
Beispiel #12
0
                        itertools.repeat(wvs4broadening),
                        itertools.repeat(planet_convspec_broadsampling),
                        itertools.repeat(A0_spec),
                        itertools.repeat(phoenix_A0_func),
                        itertools.repeat(A0_rv), itertools.repeat(A0_baryrv),
                        itertools.repeat(science_baryrv),
                        itertools.repeat(c_kms), itertools.repeat(cutoff),
                        itertools.repeat(rv_list)))
                for vsini_id, out in enumerate(outputs_list):
                    _fluxout, _dAICout, _logpostout = out
                    fluxout[:, :, vsini_id, :] = _fluxout
                    dAICout[:, vsini_id, :] = _dAICout
                    logpostout[:, vsini_id, :] = _logpostout

            if save:
                hdulist = pyfits.HDUList()
                hdulist.append(pyfits.PrimaryHDU(data=fluxout))
                hdulist.append(pyfits.ImageHDU(data=dAICout))
                hdulist.append(pyfits.ImageHDU(data=logpostout))
                hdulist.append(pyfits.ImageHDU(data=vsini_list))
                hdulist.append(pyfits.ImageHDU(data=rv_list))
                if combined:
                    if not os.path.exists(os.path.join(sciencedir, "out")):
                        os.makedirs(os.path.join(sciencedir, "out"))
                    out = os.path.join(sciencedir, "out",
                                       "flux_and_posterior.fits")
                else:
                    if not os.path.exists(
                            os.path.join(os.path.dirname(sciencefilename),
                                         "out")):
                        os.makedirs(
  data[i3:i4,i3:i4] = data[i3:i4,i3:i4] * wfec
  
  # FFT
  ft = pyfftw.interfaces.numpy_fft.fft2(data)
  fts = np.fft.fftshift(ft)

  # 結果を入射光子数(電子数)の重みを付けて加算する。
  i1 = int(N/2-N0/2)
  i2 = int(N/2+N0/2)
  NPm = (NP[i] + NP[i+1])/2
  image = image + NPm*(fts.real[i1:i2,i1:i2]**2 + fts.imag[i1:i2,i1:i2]**2)
 
# 最後に規格化しておく、値は1秒あたりのelectron数になるように
s = np.sum(image)
image = image/s * Ntot * Stel
# Save 
hdu = fits.PrimaryHDU(image)
hdu.header['NTOT'] = Ntot  # total flux/m2/sec

hdu.header['APTFILE'] = ahdr['APTFILE']     # telescope.json
hdu.header['EPD']     = ahdr['EPD']         # pupil diameter (mm)
hdu.header['COBS']    = ahdr['COBS']        # central obscuration ratio 
hdu.header['STYPE']   = ahdr['STYPE']       # Spider type
hdu.header['TSP']     = ahdr['TSP']         # spider thickness (mm)
hdu.header['STEL']    = ahdr['STEL']        # Area of telescope aperture (m2)
hdu.header['SPRES']   = args['-s']          # spectral responce 
hdu.header['M'] = M   # Number of FFT cells per wavelength in um

hdulist = fits.HDUList([hdu])
hdulist.writeto(args['-p'],overwrite=True)
Beispiel #14
0
    pixel_scale = hdu[0].header['PIXSCALE']  # arcsec/pix
    flux__xl = hdu['SCI'].data / flux_cal
    eflux__xl = np.sqrt(hdu['VAR'].data) / flux_cal
    flux_rf__xl = flux__xl * (1. + z)
    eflux_rf__xl = eflux__xl * (1. + z)
    SN_rf__xl = flux_rf__xl / eflux_rf__xl
    cpix = flux_rf__xl.sum(axis=1).argmax()
    flag__xl = hdu['DQ'].data
    dist__x = np.abs([(i - cpix) * pixel_scale for i in range(Npix)])
    iS = np.argsort(dist__x)
    for i in range(Npix):
        i_spectra = iS[i]
        print i, iS[i]
        with open('spectra/ngc3081_545_spec%04d_%04d.txt' % (i, i_spectra),
                  'w') as f:
            for i_l in range(Nl):
                f.write(
                    '%.1f    %e    %e    %e\n' %
                    (l_rf[i_l], flux_rf__xl[i_spectra, i_l],
                     eflux_rf__xl[i_spectra, i_l], flag__xl[i_spectra, i_l]))

    if save_fits:
        hduout = fits.HDUList()
        hduout.append(fits.PrimaryHDU(header=hdu[0].header))
        hduout.append(fits.ImageHDU(data=wl, name='L_RF'))
        hduout.append(fits.ImageHDU(data=flux_rf__xl, name='F_RF__XL'))
        hduout.append(fits.ImageHDU(data=eflux_rf__xl, name='ERR_F_RF__XL'))
        hduout.append(fits.ImageHDU(data=flag__xl, name='FLAG__XL'))
        hduout.append(fits.ImageHDU(data=dist__x, name='DIST__X'))
        hduout.writeto('ngc3081_545_vardq_extract_spectra.fits', clobber=True)
Beispiel #15
0
fileout = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA.fits'
fileout_thin = 'Tb_reso_' + str(reso) + 'km.s-1_' + "Tmin_" + str(Tk_lim_inf) + "_Tmax_" + str(Tk_lim_sup) + '_ROHSA_thin.fits'

# Write PPV cube
hdu0 = fits.PrimaryHDU(Tb)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb.shape[1]
hdu0.header['NAXIS2'] = Tb.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
hdulist.writeto(path_out + fileout, overwrite=True)
    
# Write PPV cube thin limit
hdu0 = fits.PrimaryHDU(Tb_thin)
hdu0.header['COMMENT'] = 'Brightness Temperature Tb'
hdu0.header['NAXIS'] = 3
hdu0.header['NAXIS1'] = Tb_thin.shape[1]
hdu0.header['NAXIS2'] = Tb_thin.shape[2]
hdu0.header['NAXIS3'] = len(u)
hdu0.header['CTYPE3'] = 'v [km.s-1]'
hdu0.header['CRVAL3'] = u[40]
hdu0.header['CDELT3'] = reso
hdu0.header['CRPIX3'] = 40
hdu0.header['BUNIT'] = 'K'
hdulist = fits.HDUList([hdu0])
Beispiel #16
0
def convert_tscube_old(infile, outfile):
    """Convert between old and new TSCube formats."""
    inhdulist = fits.open(infile)

    # If already in the new-style format just write and exit
    if 'DLOGLIKE_SCAN' in inhdulist['SCANDATA'].columns.names:
        if infile != outfile:
            inhdulist.writeto(outfile, clobber=True)
        return

    # Get stuff out of the input file
    nrows = inhdulist['SCANDATA']._nrows
    nebins = inhdulist['EBOUNDS']._nrows
    npts = inhdulist['SCANDATA'].data.field('NORMSCAN').shape[1] / nebins

    emin = inhdulist['EBOUNDS'].data.field('e_min') / 1E3
    emax = inhdulist['EBOUNDS'].data.field('e_max') / 1E3
    eref = np.sqrt(emin * emax)
    dnde_emin = inhdulist['EBOUNDS'].data.field('E_MIN_FL')
    dnde_emax = inhdulist['EBOUNDS'].data.field('E_MAX_FL')
    index = np.log(dnde_emin / dnde_emax) / np.log(emin / emax)

    flux = PowerLaw.eval_flux(emin, emax, [dnde_emin, index], emin)
    eflux = PowerLaw.eval_eflux(emin, emax, [dnde_emin, index], emin)
    dnde = PowerLaw.eval_dnde(np.sqrt(emin * emax), [dnde_emin, index], emin)

    ts_map = inhdulist['PRIMARY'].data.reshape((nrows))
    ok_map = inhdulist['TSMAP_OK'].data.reshape((nrows))
    n_map = inhdulist['N_MAP'].data.reshape((nrows))
    errp_map = inhdulist['ERRP_MAP'].data.reshape((nrows))
    errn_map = inhdulist['ERRN_MAP'].data.reshape((nrows))
    err_map = np.ndarray((nrows))
    m = errn_map > 0
    err_map[m] = 0.5 * (errp_map[m] + errn_map[m])
    err_map[~m] = errp_map[~m]
    ul_map = n_map + 2.0 * errp_map

    ncube = np.rollaxis(inhdulist['N_CUBE'].data, 0, 3).reshape(
        (nrows, nebins))
    errpcube = np.rollaxis(inhdulist['ERRPCUBE'].data, 0, 3).reshape(
        (nrows, nebins))
    errncube = np.rollaxis(inhdulist['ERRNCUBE'].data, 0, 3).reshape(
        (nrows, nebins))
    tscube = np.rollaxis(inhdulist['TSCUBE'].data, 0, 3).reshape(
        (nrows, nebins))
    nll_cube = np.rollaxis(inhdulist['NLL_CUBE'].data, 0, 3).reshape(
        (nrows, nebins))
    ok_cube = np.rollaxis(inhdulist['TSCUBE_OK'].data, 0, 3).reshape(
        (nrows, nebins))

    ul_cube = ncube + 2.0 * errpcube
    m = errncube > 0
    errcube = np.ndarray((nrows, nebins))
    errcube[m] = 0.5 * (errpcube[m] + errncube[m])
    errcube[~m] = errpcube[~m]

    norm_scan = inhdulist['SCANDATA'].data.field('NORMSCAN').reshape(
        (nrows, npts, nebins)).swapaxes(1, 2)
    nll_scan = inhdulist['SCANDATA'].data.field('NLL_SCAN').reshape(
        (nrows, npts, nebins)).swapaxes(1, 2)

    # Adjust the "EBOUNDS" hdu
    columns = inhdulist['EBOUNDS'].columns
    columns.add_col(
        fits.Column(name=str('e_ref'),
                    format='E',
                    array=eref * 1E3,
                    unit='keV'))
    columns.add_col(
        fits.Column(name=str('ref_flux'),
                    format='D',
                    array=flux,
                    unit='ph / (cm2 s)'))
    columns.add_col(
        fits.Column(name=str('ref_eflux'),
                    format='D',
                    array=eflux,
                    unit='MeV / (cm2 s)'))
    columns.add_col(
        fits.Column(name=str('ref_dnde'),
                    format='D',
                    array=dnde,
                    unit='ph / (MeV cm2 s)'))

    columns.change_name('E_MIN_FL', str('ref_dnde_e_min'))
    columns.change_unit('ref_dnde_e_min', 'ph / (MeV cm2 s)')
    columns.change_name('E_MAX_FL', str('ref_dnde_e_max'))
    columns.change_unit('ref_dnde_e_max', 'ph / (MeV cm2 s)')
    columns.change_name('NPRED', str('ref_npred'))

    hdu_e = fits.BinTableHDU.from_columns(columns, name='EBOUNDS')

    # Make the "FITDATA" hdu
    columns = fits.ColDefs([])

    columns.add_col(fits.Column(name=str('fit_ts'), format='E', array=ts_map))
    columns.add_col(
        fits.Column(name=str('fit_status'), format='E', array=ok_map))
    columns.add_col(fits.Column(name=str('fit_norm'), format='E', array=n_map))
    columns.add_col(
        fits.Column(name=str('fit_norm_err'), format='E', array=err_map))
    columns.add_col(
        fits.Column(name=str('fit_norm_errp'), format='E', array=errp_map))
    columns.add_col(
        fits.Column(name=str('fit_norm_errn'), format='E', array=errn_map))
    hdu_f = fits.BinTableHDU.from_columns(columns, name='FITDATA')

    # Make the "SCANDATA" hdu
    columns = fits.ColDefs([])

    columns.add_col(
        fits.Column(name=str('ts'),
                    format='%iE' % nebins,
                    array=tscube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('bin_status'),
                    format='%iE' % nebins,
                    array=ok_cube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('norm'),
                    format='%iE' % nebins,
                    array=ncube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('norm_ul'),
                    format='%iE' % nebins,
                    array=ul_cube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('norm_err'),
                    format='%iE' % nebins,
                    array=errcube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('norm_errp'),
                    format='%iE' % nebins,
                    array=errpcube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('norm_errn'),
                    format='%iE' % nebins,
                    array=errncube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('loglike'),
                    format='%iE' % nebins,
                    array=nll_cube,
                    dim=str('(%i)' % nebins)))

    columns.add_col(
        fits.Column(name=str('norm_scan'),
                    format='%iE' % (nebins * npts),
                    array=norm_scan,
                    dim=str('(%i,%i)' % (npts, nebins))))

    columns.add_col(
        fits.Column(name=str('dloglike_scan'),
                    format='%iE' % (nebins * npts),
                    array=nll_scan,
                    dim=str('(%i,%i)' % (npts, nebins))))

    hdu_s = fits.BinTableHDU.from_columns(columns, name='SCANDATA')

    hdulist = fits.HDUList(
        [inhdulist[0], hdu_s, hdu_f, inhdulist["BASELINE"], hdu_e])

    hdulist['SCANDATA'].header['UL_CONF'] = 0.95

    hdulist.writeto(outfile, clobber=True)

    return hdulist
Beispiel #17
0
def modelSpec(lib='GK',
              teff=4500,
              logg=2.5,
              metals=0.,
              cfe=0.,
              nfe=0.,
              afe=0.,
              vmicro=2.,
              dr=None,
              rmHDU1=True,
              rmHDU2=True):
    """
    NAME:
       modelSpec
    PURPOSE:
       download a model spectrum file
    INPUT:
       lib= ('GK') spectral library
       teff= (4500) grid-point Teff
       logg= (2.5) grid-point logg
       metals= (0.) grid-point metallicity
       cfe= (0.) grid-point carbon-enhancement
       nfe= (0.) grid-point nitrogen-enhancement
       afe= (0.) grid-point alpha-enhancement
       vmicro= (2.) grid-point microturbulence
       dr= return the path corresponding to this data release
       rmHUD1= (True) if True, rm the first (v. large) HDU with the high-resolution model spectrum
       rmHDU2= (True) if True, rm the second (quite large) HDU with the model spectrum convolved with the LSF
    OUTPUT:
       (none; just downloads)
    HISTORY:
       2015-01-20 - Written - Bovy (IAS)
    """
    if dr is None: dr = path._default_dr()
    # First make sure the file doesn't exist
    filePath = path.modelSpecPath(lib=lib,
                                  teff=teff,
                                  logg=logg,
                                  metals=metals,
                                  cfe=cfe,
                                  nfe=nfe,
                                  afe=afe,
                                  vmicro=vmicro,
                                  dr=dr)
    if os.path.exists(filePath): return None
    # Create the file path
    downloadPath = filePath.replace(
        os.path.join(path._APOGEE_DATA, _dr_string(dr)), _base_url(dr=dr))
    _download_file(downloadPath, filePath, dr, verbose=True)
    # Post-processing of the file, removing the big first HDU or the first two for local storage
    if rmHDU1 or rmHDU2:
        # Open the file, need to use astropy's fits reader, bc the file has issues
        import astropy.io.fits as apyfits
        from astropy.utils.exceptions import AstropyUserWarning
        import warnings
        warnings.filterwarnings('ignore', category=AstropyUserWarning)
        hdulist = apyfits.open(filePath)
        if rmHDU1:
            hdu = apyfits.PrimaryHDU(numpy.zeros((2, 2)))
        else:
            hdu = hdulist[0].copy()
        inp = [hdu]
        if rmHDU2:
            hdu2 = apyfits.ImageHDU(numpy.zeros((2, 2)))
        else:
            hdu2 = hdulist[1].copy()
        inp.append(hdu2)
        inp.extend(hdulist[2:])
        newHdulist = apyfits.HDUList(inp)
        # Fix any issues in the headers
        for ii in range(5):
            if '[A/M]' in newHdulist[ii].header:
                newHdulist[ii].header['AM'] = newHdulist[ii].header.pop(
                    '[A/M]', None)
            if '[C/M]' in newHdulist[ii].header:
                newHdulist[ii].header['CM'] = newHdulist[ii].header.pop(
                    '[C/M]', None)
            if '[N/M]' in newHdulist[ii].header:
                newHdulist[ii].header['NM'] = newHdulist[ii].header.pop(
                    '[N/M]', None)
        # Overwrite file
        newHdulist.writeto(filePath, clobber=True, output_verify='silentfix')
    return None
Beispiel #18
0
    def _build_tpf(self,
                   cube_fits,
                   img_cube,
                   uncert_cube,
                   cutout_wcs_dict,
                   aperture,
                   verbose=True):
        """
        Building the cutout target pixel file (TPF) and formatting it to match TESS pipeline TPFs.

        Paramters
        ---------
        cube_fits : `~astropy.io.fits.hdu.hdulist.HDUList`
            The cube hdu list.
        img_cube : `numpy.array`
            The untransposed image cutout array
        uncert_cube : `numpy.array`
            The untransposed uncertainty cutout array
        cutout_wcs_dict : dict
            Dictionary of wcs keyword/value pairs to be added to each array 
            column in the cutout table header.
        aperture : `numpy.array`
            The aperture array (an array the size of a single cutout 
            that is 1 where there is image data and 0 where there isn't)        
        verbose : bool
            Optional. If true intermediate information is printed. 

        Returns
        -------
        response :  `~astropy.io.fits.HDUList`
            Target pixel file HDU list
        """

        # The primary hdu is just the main header, which is the same
        # as the one on the cube file
        primary_hdu = cube_fits[0]
        self._update_primary_header(primary_hdu.header)

        cols = list()

        # Adding the cutouts
        tform = str(img_cube[0].size) + "E"
        dims = str(img_cube[0].shape[::-1])
        empty_arr = np.zeros(img_cube.shape)

        # Adding the Time relates columns
        cols.append(
            fits.Column(name='TIME',
                        format='D',
                        unit='BJD - 2457000, days',
                        disp='D14.7',
                        array=(cube_fits[2].columns['TSTART'].array +
                               cube_fits[2].columns['TSTOP'].array) / 2))

        cols.append(
            fits.Column(name='TIMECORR',
                        format='E',
                        unit='d',
                        disp='E14.7',
                        array=cube_fits[2].columns['BARYCORR'].array))

        # Adding CADENCENO as zeros b/c we don't have this info
        cols.append(
            fits.Column(name='CADENCENO',
                        format='J',
                        disp='I10',
                        array=empty_arr[:, 0, 0]))

        # Adding counts (-1 b/c we don't have data)
        cols.append(
            fits.Column(name='RAW_CNTS',
                        format=tform.replace('E', 'J'),
                        unit='count',
                        dim=dims,
                        disp='I8',
                        array=empty_arr - 1,
                        null=-1))

        # Adding flux and flux_err (data we actually have!)
        cols.append(
            fits.Column(name='FLUX',
                        format=tform,
                        dim=dims,
                        unit='e-/s',
                        disp='E14.7',
                        array=img_cube))
        cols.append(
            fits.Column(name='FLUX_ERR',
                        format=tform,
                        dim=dims,
                        unit='e-/s',
                        disp='E14.7',
                        array=uncert_cube))

        # Adding the background info (zeros b.c we don't have this info)
        cols.append(
            fits.Column(name='FLUX_BKG',
                        format=tform,
                        dim=dims,
                        unit='e-/s',
                        disp='E14.7',
                        array=empty_arr))
        cols.append(
            fits.Column(name='FLUX_BKG_ERR',
                        format=tform,
                        dim=dims,
                        unit='e-/s',
                        disp='E14.7',
                        array=empty_arr))

        # Adding the quality flags
        cols.append(
            fits.Column(name='QUALITY',
                        format='J',
                        disp='B16.16',
                        array=cube_fits[2].columns['DQUALITY'].array))

        # Adding the position correction info (zeros b.c we don't have this info)
        cols.append(
            fits.Column(name='POS_CORR1',
                        format='E',
                        unit='pixel',
                        disp='E14.7',
                        array=empty_arr[:, 0, 0]))
        cols.append(
            fits.Column(name='POS_CORR2',
                        format='E',
                        unit='pixel',
                        disp='E14.7',
                        array=empty_arr[:, 0, 0]))

        # Adding the FFI_FILE column (not in the pipeline tpfs)
        cols.append(
            fits.Column(name='FFI_FILE',
                        format='38A',
                        unit='pixel',
                        array=cube_fits[2].columns['FFI_FILE'].array))

        # making the table HDU
        table_hdu = fits.BinTableHDU.from_columns(cols)
        table_hdu.header['EXTNAME'] = 'PIXELS'
        table_hdu.header['INHERIT'] = True

        # Adding the wcs keywords to the columns and removing from the header
        self._add_column_wcs(table_hdu.header, cutout_wcs_dict)

        # Adding the extra image keywords
        self._add_img_kwds(table_hdu.header)

        # Building the aperture HDU
        aperture_hdu = fits.ImageHDU(data=aperture)
        aperture_hdu.header['EXTNAME'] = 'APERTURE'
        self._add_aperture_wcs(aperture_hdu.header, cube_fits[2].header)
        aperture_hdu.header['INHERIT'] = True

        cutout_hdu_list = fits.HDUList([primary_hdu, table_hdu, aperture_hdu])

        self._apply_header_inherit(cutout_hdu_list)

        return cutout_hdu_list
def average_adjacent_obs(obslist, tharlist, folder):

    obslist_jd = array(obslist[1])

    for i in range(len(tharlist[0]) - 1):
        obsindx = []
        objname = []
        for j in range(len(obslist[1])):
            fitsname = os.path.basename(obslist[0][j])
            print fitsname

            ### return spectra taken between these two thars

            if obslist[1][j] > tharlist[1][i] and obslist[1][j] < tharlist[1][
                    i + 1] and os.path.exists(folder + "/temp/" + fitsname +
                                              ".spec.pkl"):
                print 'in the loop', j
                objname.append(pyfits.getheader(obslist[0][j])["OBJECT"])
                obsindx.append(j)

        print 'obsindx', obsindx

        if len(obsindx) > 0:

            objectlist = unique(objname)
            print objectlist

            for obj in objectlist:

                spectrum_list = []
                background_list = []
                thar_list = []
                spectrum_noflat_list = []
                background_noflat_list = []
                header_list = []

                for i in range(len(
                        objname)):  ### read in all the spectra of that object
                    if objname[i] == obj:
                        fitsname = os.path.basename(obslist[0][obsindx[i]])
                        spec = pickle.load(
                            open(folder + "/temp/" + fitsname + ".spec.pkl",
                                 "rb"))
                        header_list.append(
                            pyfits.getheader(obslist[0][obsindx[i]]))

                        spectrum_list.append(spec[0])
                        background_list.append(spec[1])
                        thar_list.append(spec[2])
                        spectrum_noflat_list.append(spec[3])
                        background_noflat_list.append(spec[4])

                header, fitsTIME = combine_header(header_list)
                print obj, len(spectrum_list)

                if len(spectrum_list) == 1:
                    spectrum_master, background_master, thar_master, spectrum_noflat_master, background_noflat_master = spectrum_list[
                        0], background_list[0], thar_list[
                            0], spectrum_noflat_list[
                                0], background_noflat_list[0]

                else:
                    spectrum_master,background_master,thar_master,spectrum_noflat_master,background_noflat_master = [],[],[],[],[]
                    for order in range(len(spectrum_list[0])):

                        order_specs = []
                        for s in background_list:
                            order_specs.append(s[order])
                            #plt.plot(s[order])
                            #plt.show()
                        bk_summed = sum(array(order_specs), axis=0)
                        background_master.append(bk_summed)

                        order_specs = []
                        for s in spectrum_list:
                            order_specs.append(s[order])

                        spec_summed = combine_order(array(order_specs))
                        spectrum_master.append(spec_summed)  #-bk_summed)

                        order_specs = []
                        for s in thar_list:
                            order_specs.append(s[order])
                        spec_summed = mean(array(order_specs), axis=0)
                        thar_master.append(spec_summed)

                        order_specs = []
                        for s in background_noflat_list:
                            order_specs.append(s[order])
                        bk_summed = sum(array(order_specs), axis=0)
                        background_noflat_master.append(bk_summed)

                        order_specs = []
                        for s in spectrum_noflat_list:
                            order_specs.append(s[order])

                        spec_summed = combine_order(array(order_specs))
                        spectrum_noflat_master.append(
                            spec_summed)  #-bk_summed)

                ### perform checks on spectrum for nan and infs
                def nanchecks(spec):
                    for i in range(len(spec)):
                        mask = spec[i] != spec[i]
                        mask += abs(spec[i]) == inf

                        try:
                            print i, len(spec[i]), spec[i], mask, ~any(mask)
                            indx = arange(len(spec[i]))

                            if sum(mask) > 0 and sum(mask) < len(spec[i]):
                                print 'sum(mask)', sum(mask)
                                for j in indx[mask]:
                                    adjacent_indx = abs(indx - j)
                                    adjacent_indx_mask = adjacent_indx > 0
                                    adjacent_indx_mask *= adjacent_indx < min(
                                        adjacent_indx[invert(
                                            mask)]) + 10  # this line crashes
                                    fixval = nanmean(
                                        spec[i][adjacent_indx_mask])
                                    if fixval != fixval or abs(fixval) == inf:
                                        fixval = nanmedian(spec[i])
                                    spec[i][j] = fixval
                            elif sum(mask) == len(spec[i]):
                                print 'sum(mask)', sum(mask), 'setting to 0'
                                spec[i] = zeros(len(spec[i]))
                        except:
                            print 'len0', 'setting to 0'
                            spec[i] = zeros(2042)  # HARDCODED
                        print

                    return spec

                spectrum_master = nanchecks(spectrum_master)
                background_master = nanchecks(background_master)
                thar_master = nanchecks(thar_master)
                spectrum_noflat_master = nanchecks(spectrum_noflat_master)
                background_noflat_master = nanchecks(background_noflat_master)

                hduspec = pyfits.PrimaryHDU(array(spectrum_master),
                                            header=header)
                hdubk = pyfits.ImageHDU(array(background_master))
                hduthar = pyfits.ImageHDU(array(thar_master))
                hduspec_nf = pyfits.ImageHDU(array(spectrum_noflat_master))
                hdubk_nf = pyfits.ImageHDU(array(background_noflat_master))
                hdulist = pyfits.HDUList(
                    [hduspec, hdubk, hduthar, hduspec_nf, hdubk_nf])

                output_name = folder + "/temp/" + "ANU23e_" + obj + "_" + fitsTIME + ".fits"
                os.system("rm " + output_name)
                hdulist.writeto(output_name, overwrite=True)

    print('DONE creating fits files.')
    f_date_iso=f_date_start.strftime('%Y-%m-%dT%H:%M:%S.%f')

    hdu_dark = fits.PrimaryHDU(dark*s1c_spcal_day_coef)
    hdu_dark.header['DATE-OBS']=f_date_iso
    hdu_dark.header['BITPIX']=-64
    hdu_dark.header['EXPTIME']=f_exp
    hdu_dark.header['PROJ-TYP']='TAN'
    hdu_dark.header['AMCAL-A0']=az0
    hdu_dark.header['AMCAL-H0']=alt0
    hdu_dark.header['AMCAL-A']=str(a)
    hdu_dark.header['AMCAL-B']=str(b)
    hdu_dark.header['AMCAL-C']=str(c)
    hdu_dark.header['AMCAL-D']=str(d)
    hdu_dark.header['MED-WID1']=avr_width1
    hdulist_dark = fits.HDUList([hdu_dark])
    hdulist_dark.writeto(spath+fn.split('/')[-1].split('.')[-2]+"_dark.fit",overwrite=True)

    hdulist = fits.open(fn,ignore_missing_end=True)
    img=hdulist[0].data.astype('float')
    #~ img=ss.medfilt((img-masterdark.astype('float'))/masterflat.astype('float') - dark,kernel_size=avr_width2) * s1c_spcal_day_coef
    img=ss.medfilt((img-masterdark.astype('float'))/masterflat.astype('float') - dark,kernel_size=avr_width2) * 2.20
    
    hdu_light = fits.PrimaryHDU(img)
    hdu_light.header['DATE-OBS']=f_date_iso
    hdu_light.header['BITPIX']=-64
    hdu_light.header['EXPTIME']=f_exp
    hdu_light.header['PROJ-TYP']='TAN'
    hdu_light.header['AMCAL-A0']=az0
    hdu_light.header['AMCAL-H0']=alt0
    hdu_light.header['AMCAL-A']=str(a)
Beispiel #21
0
    ('PHS_OFFS', 0.0, 'Phase offset of bin 0 for gated data'))
table_hdu.header.append(
    ('NBITS', 8, 'Nr of bits/datum (SEARCH mode "X" data, else 1)'))
table_hdu.header.append(
    ('NSUBOFFS', nsuboffs, 'Subint offset (Contiguous SEARCH-mode files)'))
table_hdu.header.append(
    ('NCHAN', chnum / Fdsamp, 'Number of channels/sub-bands in this file'))
table_hdu.header.append(
    ('CHAN_BW', chan_bw * Fdsamp * 1.0, '[MHz] Channel/sub-band width'))
table_hdu.header.append(
    ('NCHNOFFS', 0, 'Channel/sub-band offset for split files'))
table_hdu.header.append(('NSBLK', nsblk, 'Samples/row (SEARCH mode, else 1)'))
table_hdu.header.append(
    ('extname', 'subint  ', 'name of this binary table extension'))

hdulist2 = pyfits.HDUList([hdu0, table_hdu])
outname1 = fileroot + '_4pols_' + sys.argv[1] + '_' + sys.argv[
    2] + '_' + sys.argv[3] + '_' + sys.argv[4] + '.fits'
rmcomm1 = 'rm -f ' + outname1
os.system(rmcomm1)

print "++++++++++++++++++++++++++++++++++++++++++++++=="
print "out File header:"
hdutmp = hdulist2[1]
datatmp = hdutmp.data
float_datatmp = np.array(datatmp['DATA'])
print float_datatmp.shape
print "++++++++++++++++++++++++++++++++++++++++++++++=="

hdulist2.writeto(outname1)
Beispiel #22
0
 def to_fits(self):
     primary_hdu = fits.PrimaryHDU()
     image_hdu = fits.ImageHDU(self.data, self.header)
     hdulist = fits.HDUList([primary_hdu, image_hdu])
     return hdulist
Beispiel #23
0
def write_to_fits(d, ofile, name=None, hdr=None, overwrite=False, checksum=True):
    """
    Write the provided object to a fits file.

    This is either a convenience wrapper for :func:`write_to_hdu`
    that adds a primary HDU and writes the result to the provided
    file, or a convenience wrapper for an already formed
    `astropy.io.fits.HDUList`_ passed as (``d``).

    If the provided file name includes the '.gz' extension, the file
    is first written using `astropy.io.fits.HDUList.writeto`_ and
    then compressed using :func:`compress_file`.
    
    .. note::

        Compressing the file is generally slow, but following the
        two-step process of running
        `astropy.io.fits.HDUList.writeto`_ and then
        :func:`compress_file` is generally faster than having
        `astropy.io.fits.HDUList.writeto`_ do the compression,
        particularly for files with many extensions (or at least this
        was true in the past).

    Args:
        d (:obj:`dict`, :obj:`list`, `numpy.ndarray`_, `astropy.table.Table`_, `astropy.io.fits.HDUList`_):
            Object to write to the HDU. See :func:`write_to_hdu`.
        ofile (:obj:`str`):
            File name (path) for the fits file.
        name (:obj:`str`, optional):
            Name for the extension with the data. If None, the
            extension is not given a name. However, if the input
            object is a dictionary, see :func:`dict_to_hdu` for how
            the name will overwrite any dictionary keyword associated
            with the data to write.
        hdr (`astropy.io.fits.Header`_, optional):
            Base-level header to use for *all* HDUs.
        overwrite (:obj:`bool`, optional):
            Overwrite any existing file.
        checksum (:obj:`bool`, optional):
            Passed to `astropy.io.fits.HDUList.writeto`_ to add the
            DATASUM and CHECKSUM keywords fits header(s).
    """
    if os.path.isfile(ofile) and not overwrite:
        raise FileExistsError('File already exists; to overwrite, set overwrite=True.')

    # Determine if the file should be compressed
    _ofile = ofile[:ofile.rfind('.')] if ofile.split('.')[-1] == 'gz' else ofile

    _hdr = initialize_header() if hdr is None else hdr.copy()

    # Construct the hdus and write the fits file.
    fits.HDUList(d if isinstance(d, fits.HDUList) else 
                 [fits.PrimaryHDU(header=_hdr)] + [write_to_hdu(d, name=name, hdr=_hdr)]
                 ).writeto(_ofile, overwrite=True, checksum=checksum)

    # Compress the file if the output filename has a '.gz' extension;
    # this is slow but still faster than if you have astropy.io.fits do
    # it directly
    # TODO: use pypmsgs?
    if _ofile is not ofile:
        print('Compressing file: {0}'.format(_ofile))
        compress_file(_ofile, overwrite=True)
    print('File written to: {0}'.format(ofile))
Beispiel #24
0
def reduce_rawdata():
    """Reduce the Subaru/HDS spectra.
    """
    # read obslog and config
    config = load_config('HDS\S*\.cfg$')
    logtable = load_obslog('\S*\.obslog$', fmt='astropy')

    # extract keywords from config file
    section = config['data']
    rawpath = section.get('rawpath')

    section = config['reduce']
    midpath = section.get('midpath')
    odspath = section.get('odspath')
    figpath = section.get('figpath')
    mode = section.get('mode')
    fig_format = section.get('fig_format')
    oned_suffix = section.get('oned_suffix')
    ncores = section.get('ncores')

    # create folders if not exist
    if not os.path.exists(figpath): os.mkdir(figpath)
    if not os.path.exists(odspath): os.mkdir(odspath)
    if not os.path.exists(midpath): os.mkdir(midpath)

    # determine number of cores to be used
    if ncores == 'max':
        ncores = os.cpu_count()
    else:
        ncores = min(os.cpu_count(), int(ncores))

    ############ count different setups #############
    setup_lst = {}
    for logitem in logtable:
        setup = logitem['setup']
        objtype = logitem['objtype']
        binning = logitem['binning']
        if (setup, binning) not in setup_lst:
            setup_lst[(setup, binning)] = {}
        if objtype not in setup_lst[(setup, binning)]:
            setup_lst[(setup, binning)][objtype] = 0
        setup_lst[(setup, binning)][objtype] += 1

    object_setup_lst = []
    for (setup, binning), objtype_lst in sorted(setup_lst.items()):
        print('Setup: {} Binning: {}'.format(setup, binning))
        count_total = 0
        for objtype, count in sorted(objtype_lst.items()):
            print(' - {:10s}: {:3d} Frames'.format(objtype, count))
            count_total += count
            if objtype == 'OBJECT':
                object_setup_lst.append((setup, binning))
        print(' - {:10s}: {:3d} Frames'.format('Total', count_total))
    object_setup_lst = list(set(object_setup_lst))

    # loop over different setups and binnings
    for sel_setup, sel_binning in object_setup_lst:
        print('Selected setup={}; selected binning={}'.format(
            sel_setup, sel_binning))

        ############### parse bias #################
        bias_filter = lambda item: item['setup']==sel_setup \
                        and item['binning']==sel_binning \
                        and item['objtype']=='BIAS' \
                        and item['object']=='BIAS' \
                        and item['nsat_1']<100 \
                        and item['q95_1']<10000

        bias_file = config['reduce.bias'].get('bias_file')

        if mode == 'debug' and os.path.exists(bias_file):
            pass

        else:
            bias_data_lst1 = []
            bias_data_lst2 = []
            bias_card_lst = []

            logitem_lst = list(filter(bias_filter, logtable))

            # get the number of bias images
            n_bias = len(logitem_lst)

            if n_bias == 0:
                pass

            fmt_str = (
                '  - {:>5s} {:12s} {:12s} {:<7s} {:<7s} {:1s}I2 {:>7}'
                ' {:<7s} {:5}'  # setup, binning
                ' {:>7} {:>7} {:>5} {:>5}'  # nsat_1, nsat_2, q95_1, q95_2
            )
            head_str = fmt_str.format('FID', 'fileid1', 'fileid2', 'objtype',
                                      'object', '', 'exptime', 'setup',
                                      'binning', 'nsat_1', 'nsat_2', 'q95_1',
                                      'q95_2')
            print(head_str)
            for ifile, logitem in enumerate(logitem_lst):
                fname1 = '{}.fits'.format(logitem['fileid1'])
                fname2 = '{}.fits'.format(logitem['fileid2'])
                filename1 = os.path.join(rawpath, fname1)
                filename2 = os.path.join(rawpath, fname2)
                data1, head1 = fits.getdata(filename1, header=True)
                data2, head2 = fits.getdata(filename2, header=True)
                data1 = parse_image(data1, head1)
                data2 = parse_image(data2, head2)

                string = fmt_str.format('[{:d}]'.format(logitem['frameid']),
                                        logitem['fileid1'], logitem['fileid2'],
                                        logitem['objtype'], logitem['object'],
                                        logitem['i2'], logitem['exptime'],
                                        logitem['setup'], logitem['binning'],
                                        logitem['nsat_1'], logitem['nsat_2'],
                                        logitem['q95_1'], logitem['q95_2'])
                print(print_wrapper(string, logitem))

                bias_data_lst1.append(data1)
                bias_data_lst2.append(data2)

                # append the file information
                prefix = 'HIERARCH GAMSE BIAS FILE {:03d}'.format(ifile + 1)
                card = (prefix + ' FILEID1', logitem['fileid1'])
                bias_card_lst.append(card)
                card = (prefix + ' FILEID2', logitem['fileid2'])
                bias_card_lst.append(card)

            prefix = 'HIERARCH GAMSE BIAS '
            bias_card_lst.append((prefix + 'NFILE', n_bias))

            # combine bias images
            bias_data_lst1 = np.array(bias_data_lst1)
            bias_data_lst2 = np.array(bias_data_lst2)

            combine_mode = 'mean'
            cosmic_clip = section.getfloat('cosmic_clip')
            maxiter = section.getint('maxiter')
            maskmode = (None, 'max')[n_bias >= 3]

            bias_combine1 = combine_images(
                bias_data_lst1,
                mode=combine_mode,
                upper_clip=cosmic_clip,
                maxiter=maxiter,
                maskmode=maskmode,
                ncores=ncores,
            )
            bias_combine2 = combine_images(
                bias_data_lst2,
                mode=combine_mode,
                upper_clip=cosmic_clip,
                maxiter=maxiter,
                maskmode=maskmode,
                ncores=ncores,
            )

            bias_card_lst.append((prefix + 'COMBINE_MODE', combine_mode))
            bias_card_lst.append((prefix + 'COSMIC_CLIP', cosmic_clip))
            bias_card_lst.append((prefix + 'MAXITER', maxiter))
            bias_card_lst.append((prefix + 'MASK_MODE', str(maskmode)))

            # create the hdu list to be saved
            hdu_lst = fits.HDUList()
            # create new FITS Header for bias
            head = fits.Header()
            # pack new card list into header and bias_card_lst
            for card in bias_card_lst:
                head.append(card)
            head['HIERARCH GAMSE FILECONTENT 0'] = 'BIAS COMBINED'
            hdu_lst.append(fits.PrimaryHDU(data=bias_combine1, header=head))
            hdu_lst.append(fits.ImageHDU(data=bias_combine2, header=head))

            # write to FITS file
            hdu_lst.writeto(bias_file, overwrite=True)

            message = 'Bias image written to "{}"'.format(bias_file)
            logger.info(message)
            print(message)

        ############### find flat groups #################

        flat_file_str = config['reduce.flat'].get('flat_file')
        flat_file = flat_file.format(sel_setup, sel_binning)

        if mode == 'debug' and os.path.exists(flat_file):
            continue
            # pass
        else:
            filterfunc = lambda item: item['setup']==sel_setup \
                            and item['binning']==sel_binning \
                            and item['objtype']=='FLAT' \
                            and item['object']=='FLAT'
            logitem_lst = list(filter(filterfunc, logtable))

            fmt_str = (
                '  - {:>5s} {:12s} {:12s} {:<7s} {:<7s} {:1s}I2 {:>7}'
                ' {:<7s} {:5} {:8}'  # setup, binning, slitsize
                ' {:>7} {:>7} {:>5} {:>5}'  # nsat_1, nsat_2, q95_1, q95_2
            )
            head_str = fmt_str.format('FID', 'fileid1', 'fileid2', 'objtype',
                                      'object', '', 'exptime', 'setup',
                                      'binning', 'slitsize', 'nsat_1',
                                      'nsat_2', 'q95_1', 'q95_2')

            for logitem in logtable:
                objtype = logitem['objtype']
                objname = logitem['object']
Beispiel #25
0
    def write_uvfits(self,
                     filename,
                     spoof_nonessential=False,
                     write_lst=True,
                     force_phase=False,
                     run_check=True,
                     check_extra=True,
                     run_check_acceptability=True):
        """
        Write the data to a uvfits file.

        Parameters
        ----------
        filename : str
            The uvfits file to write to.
        spoof_nonessential : bool
            Option to spoof the values of optional UVParameters that are not set
            but are required for uvfits files.
        write_lst : bool
            Option to write the LSTs to the metadata (random group parameters).
        force_phase : bool
            Option to automatically phase drift scan data to zenith of the first
            timestamp.
        run_check : bool
            Option to check for the existence and proper shapes of parameters
            before writing the file.
        check_extra : bool
            Option to check optional parameters as well as required ones.
        run_check_acceptability : bool
            Option to check acceptable range of the values of parameters before
            writing the file.
        """
        if run_check:
            self.check(check_extra=check_extra,
                       run_check_acceptability=run_check_acceptability)

        if self.phase_type == 'phased':
            pass
        elif self.phase_type == 'drift':
            if force_phase:
                print('The data are in drift mode and do not have a '
                      'defined phase center. Phasing to zenith of the first '
                      'timestamp.')
                phase_time = Time(self.time_array[0], format='jd')
                self.phase_to_time(phase_time)
            else:
                raise ValueError('The data are in drift mode. '
                                 'Set force_phase to true to phase the data '
                                 'to zenith of the first timestamp before '
                                 'writing a uvfits file.')
        else:
            raise ValueError('The phasing type of the data is unknown. '
                             'Set the phase_type to drift or phased to '
                             'reflect the phasing status of the data')

        if self.Nfreqs > 1:
            freq_spacing = self.freq_array[0, 1:] - self.freq_array[0, :-1]
            if not np.isclose(np.min(freq_spacing),
                              np.max(freq_spacing),
                              rtol=self._freq_array.tols[0],
                              atol=self._freq_array.tols[1]):
                raise ValueError(
                    'The frequencies are not evenly spaced (probably '
                    'because of a select operation). The uvfits format '
                    'does not support unevenly spaced frequencies.')
            if not np.isclose(freq_spacing[0],
                              self.channel_width,
                              rtol=self._freq_array.tols[0],
                              atol=self._freq_array.tols[1]):
                raise ValueError(
                    'The frequencies are separated by more than their '
                    'channel width (probably because of a select operation). '
                    'The uvfits format does not support frequencies '
                    'that are spaced by more than their channel width.')
            freq_spacing = freq_spacing[0]
        else:
            freq_spacing = self.channel_width

        if self.Npols > 1:
            pol_spacing = np.diff(self.polarization_array)
            if np.min(pol_spacing) < np.max(pol_spacing):
                raise ValueError(
                    'The polarization values are not evenly spaced (probably '
                    'because of a select operation). The uvfits format '
                    'does not support unevenly spaced polarizations.')
            pol_spacing = pol_spacing[0]
        else:
            pol_spacing = 1

        for p in self.extra():
            param = getattr(self, p)
            if param.name in self.uvfits_required_extra:
                if param.value is None:
                    if spoof_nonessential:
                        # spoof extra keywords required for uvfits
                        if isinstance(param, uvp.AntPositionParameter):
                            param.apply_spoof(self, 'Nants_telescope')
                        else:
                            param.apply_spoof()
                        setattr(self, p, param)
                    else:
                        raise ValueError(
                            'Required attribute {attribute} '
                            'for uvfits not defined. Define or '
                            'set spoof_nonessential to True to '
                            'spoof this attribute.'.format(attribute=p))

        # check for unflagged data with nsample = 0. Warn if any found
        wh_nsample0 = np.where(self.nsample_array == 0)
        if np.any(~self.flag_array[wh_nsample0]):
            warnings.warn('Some unflagged data has nsample = 0. Flags and '
                          'nsamples are combined in uvfits files such that '
                          'these data will appear to be flagged.')

        weights_array = self.nsample_array * \
            np.where(self.flag_array, -1, 1)
        # FITS uvw direction convention is opposite ours and Miriad's.
        # So conjugate the visibilities and flip the uvws:
        data_array = np.conj(self.data_array[:, np.newaxis,
                                             np.newaxis, :, :, :, np.newaxis])
        weights_array = weights_array[:, np.newaxis, np.newaxis, :, :, :,
                                      np.newaxis]
        # uvfits_array_data shape will be  (Nblts,1,1,[Nspws],Nfreqs,Npols,3)
        uvfits_array_data = np.concatenate(
            [data_array.real, data_array.imag, weights_array], axis=6)

        # FITS uvw direction convention is opposite ours and Miriad's.
        # So conjugate the visibilities and flip the uvws:
        uvw_array_sec = -1 * self.uvw_array / const.c.to('m/s').value
        # jd_midnight = np.floor(self.time_array[0] - 0.5) + 0.5
        tzero = np.float32(self.time_array[0])

        # uvfits convention is that time_array + relevant PZERO = actual JD
        # We are setting PZERO4 = float32(first time of observation)
        time_array = np.float32(self.time_array - np.float64(tzero))

        int_time_array = self.integration_time

        baselines_use = self.antnums_to_baseline(self.ant_1_array,
                                                 self.ant_2_array,
                                                 attempt256=True)
        # Set up dictionaries for populating hdu
        # Note that uvfits antenna arrays are 1-indexed so we add 1
        # to our 0-indexed arrays
        group_parameter_dict = {
            'UU      ': uvw_array_sec[:, 0],
            'VV      ': uvw_array_sec[:, 1],
            'WW      ': uvw_array_sec[:, 2],
            'DATE    ': time_array,
            'BASELINE': baselines_use,
            'ANTENNA1': self.ant_1_array + 1,
            'ANTENNA2': self.ant_2_array + 1,
            'SUBARRAY': np.ones_like(self.ant_1_array),
            'INTTIM  ': int_time_array
        }

        pscal_dict = {
            'UU      ': 1.0,
            'VV      ': 1.0,
            'WW      ': 1.0,
            'DATE    ': 1.0,
            'BASELINE': 1.0,
            'ANTENNA1': 1.0,
            'ANTENNA2': 1.0,
            'SUBARRAY': 1.0,
            'INTTIM  ': 1.0
        }
        pzero_dict = {
            'UU      ': 0.0,
            'VV      ': 0.0,
            'WW      ': 0.0,
            'DATE    ': tzero,
            'BASELINE': 0.0,
            'ANTENNA1': 0.0,
            'ANTENNA2': 0.0,
            'SUBARRAY': 0.0,
            'INTTIM  ': 0.0
        }

        if write_lst:
            # lst is a non-standard entry (it's not in the AIPS memo)
            # but storing it can be useful (e.g. can avoid recalculating it on read)
            # need to store it in 2 parts to get enough accuracy
            # angles in uvfits files are stored in degrees, so first convert to degrees
            lst_array_deg = np.rad2deg(self.lst_array)
            lst_array_1 = np.float32(lst_array_deg)
            lst_array_2 = np.float32(lst_array_deg - np.float64(lst_array_1))
            group_parameter_dict['LST     '] = lst_array_1
            pscal_dict['LST     '] = 1.0
            pzero_dict['LST     '] = 0.0

        # list contains arrays of [u,v,w,date,baseline];
        # each array has shape (Nblts)
        parnames_use = ['UU      ', 'VV      ', 'WW      ', 'DATE    ']
        if (np.max(self.ant_1_array) < 255 and np.max(self.ant_2_array) < 255):
            # if the number of antennas is less than 256 then include both the
            # baseline array and the antenna arrays in the group parameters.
            # Otherwise just use the antenna arrays
            parnames_use.append('BASELINE')

        parnames_use += ['ANTENNA1', 'ANTENNA2', 'SUBARRAY', 'INTTIM  ']

        if write_lst:
            parnames_use.append('LST     ')

        group_parameter_list = [
            group_parameter_dict[parname] for parname in parnames_use
        ]

        if write_lst:
            # add second LST array part
            parnames_use.append('LST     ')
            group_parameter_list.append(lst_array_2)

        hdu = fits.GroupData(uvfits_array_data,
                             parnames=parnames_use,
                             pardata=group_parameter_list,
                             bitpix=-32)
        hdu = fits.GroupsHDU(hdu)

        for i, key in enumerate(parnames_use):
            hdu.header['PSCAL' + str(i + 1) + '  '] = pscal_dict[key]
            hdu.header['PZERO' + str(i + 1) + '  '] = pzero_dict[key]

        # ISO string of first time in self.time_array
        hdu.header['DATE-OBS'] = Time(self.time_array[0],
                                      scale='utc',
                                      format='jd').isot

        hdu.header['CTYPE2  '] = 'COMPLEX '
        hdu.header['CRVAL2  '] = 1.0
        hdu.header['CRPIX2  '] = 1.0
        hdu.header['CDELT2  '] = 1.0

        # Note: This axis is called STOKES to comply with the AIPS memo 117
        # However, this confusing because it is NOT a true Stokes axis,
        #   it is really the polarization axis.
        hdu.header['CTYPE3  '] = 'STOKES  '
        hdu.header['CRVAL3  '] = self.polarization_array[0]
        hdu.header['CRPIX3  '] = 1.0
        hdu.header['CDELT3  '] = pol_spacing

        hdu.header['CTYPE4  '] = 'FREQ    '
        hdu.header['CRVAL4  '] = self.freq_array[0, 0]
        hdu.header['CRPIX4  '] = 1.0
        hdu.header['CDELT4  '] = freq_spacing

        hdu.header['CTYPE5  '] = 'IF      '
        hdu.header['CRVAL5  '] = 1.0
        hdu.header['CRPIX5  '] = 1.0
        hdu.header['CDELT5  '] = 1.0

        hdu.header['CTYPE6  '] = 'RA'
        hdu.header['CRVAL6  '] = self.phase_center_ra_degrees

        hdu.header['CTYPE7  '] = 'DEC'
        hdu.header['CRVAL7  '] = self.phase_center_dec_degrees

        hdu.header['BUNIT   '] = self.vis_units
        hdu.header['BSCALE  '] = 1.0
        hdu.header['BZERO   '] = 0.0

        hdu.header['OBJECT  '] = self.object_name
        hdu.header['TELESCOP'] = self.telescope_name
        hdu.header['LAT     '] = self.telescope_location_lat_lon_alt_degrees[0]
        hdu.header['LON     '] = self.telescope_location_lat_lon_alt_degrees[1]
        hdu.header['ALT     '] = self.telescope_location_lat_lon_alt[2]
        hdu.header['INSTRUME'] = self.instrument
        hdu.header['EPOCH   '] = float(self.phase_center_epoch)
        if self.phase_center_frame is not None:
            hdu.header['PHSFRAME'] = self.phase_center_frame

        if self.x_orientation is not None:
            hdu.header['XORIENT'] = self.x_orientation

        if self.blt_order is not None:
            blt_order_str = ', '.join(self.blt_order)
            hdu.header['BLTORDER'] = blt_order_str

        for line in self.history.splitlines():
            hdu.header.add_history(line)

        # end standard keywords; begin user-defined keywords
        for key, value in self.extra_keywords.items():
            # header keywords have to be 8 characters or less
            if len(str(key)) > 8:
                warnings.warn(
                    'key {key} in extra_keywords is longer than 8 '
                    'characters. It will be truncated to 8 as required '
                    'by the uvfits file format.'.format(key=key))
            keyword = key[:8].upper()
            if isinstance(value, (dict, list, np.ndarray)):
                raise TypeError('Extra keyword {keyword} is of {keytype}. '
                                'Only strings and numbers are '
                                'supported in uvfits.'.format(
                                    keyword=key, keytype=type(value)))

            if keyword == 'COMMENT':
                for line in value.splitlines():
                    hdu.header.add_comment(line)
            else:
                hdu.header[keyword] = value

        # ADD the ANTENNA table
        staxof = np.zeros(self.Nants_telescope)

        # 0 specifies alt-az, 6 would specify a phased array
        mntsta = np.zeros(self.Nants_telescope)

        # beware, X can mean just about anything
        poltya = np.full((self.Nants_telescope), 'X', dtype=np.object_)
        polaa = [90.0] + np.zeros(self.Nants_telescope)
        poltyb = np.full((self.Nants_telescope), 'Y', dtype=np.object_)
        polab = [0.0] + np.zeros(self.Nants_telescope)

        col1 = fits.Column(name='ANNAME',
                           format='8A',
                           array=self.antenna_names)
        # AIPS memo #117 says that antenna_positions should be relative to
        # the array center, but in a rotated ECEF frame so that the x-axis
        # goes through the local meridian.
        longitude = self.telescope_location_lat_lon_alt[1]
        rot_ecef_positions = uvutils.rotECEF_from_ECEF(self.antenna_positions,
                                                       longitude)
        col2 = fits.Column(name='STABXYZ',
                           format='3D',
                           array=rot_ecef_positions)
        # convert to 1-indexed from 0-indexed indicies
        col3 = fits.Column(name='NOSTA',
                           format='1J',
                           array=self.antenna_numbers + 1)
        col4 = fits.Column(name='MNTSTA', format='1J', array=mntsta)
        col5 = fits.Column(name='STAXOF', format='1E', array=staxof)
        col6 = fits.Column(name='POLTYA', format='1A', array=poltya)
        col7 = fits.Column(name='POLAA', format='1E', array=polaa)
        # col8 = fits.Column(name='POLCALA', format='3E', array=polcala)
        col9 = fits.Column(name='POLTYB', format='1A', array=poltyb)
        col10 = fits.Column(name='POLAB', format='1E', array=polab)
        # col11 = fits.Column(name='POLCALB', format='3E', array=polcalb)
        # note ORBPARM is technically required, but we didn't put it in
        col_list = [col1, col2, col3, col4, col5, col6, col7, col9, col10]

        if self.antenna_diameters is not None:
            col12 = fits.Column(name='DIAMETER',
                                format='1E',
                                array=self.antenna_diameters)
            col_list.append(col12)

        cols = fits.ColDefs(col_list)

        ant_hdu = fits.BinTableHDU.from_columns(cols)

        ant_hdu.header['EXTNAME'] = 'AIPS AN'
        ant_hdu.header['EXTVER'] = 1

        # write XYZ coordinates if not already defined
        ant_hdu.header['ARRAYX'] = self.telescope_location[0]
        ant_hdu.header['ARRAYY'] = self.telescope_location[1]
        ant_hdu.header['ARRAYZ'] = self.telescope_location[2]
        ant_hdu.header['FRAME'] = 'ITRF'
        ant_hdu.header['GSTIA0'] = self.gst0
        ant_hdu.header['FREQ'] = self.freq_array[0, 0]
        ant_hdu.header['RDATE'] = self.rdate
        ant_hdu.header['UT1UTC'] = self.dut1

        ant_hdu.header['TIMSYS'] = self.timesys
        if self.timesys != 'UTC':
            raise ValueError(
                'This file has a time system {tsys}. '
                'Only "UTC" time system files are supported'.format(
                    tsys=self.timesys))
        ant_hdu.header['ARRNAM'] = self.telescope_name
        ant_hdu.header['NO_IF'] = self.Nspws
        ant_hdu.header['DEGPDY'] = self.earth_omega
        # ant_hdu.header['IATUTC'] = 35.

        # set mandatory parameters which are not supported by this object
        # (or that we just don't understand)
        ant_hdu.header['NUMORB'] = 0

        # note: Bart had this set to 3. We've set it 0 after aips 117. -jph
        ant_hdu.header['NOPCAL'] = 0

        ant_hdu.header['POLTYPE'] = 'X-Y LIN'

        # note: we do not support the concept of "frequency setups"
        # -- lists of spws given in a SU table.
        ant_hdu.header['FREQID'] = -1

        # if there are offsets in images, this could be the culprit
        ant_hdu.header['POLARX'] = 0.0
        ant_hdu.header['POLARY'] = 0.0

        ant_hdu.header['DATUTC'] = 0  # ONLY UTC SUPPORTED

        # we always output right handed coordinates
        ant_hdu.header['XYZHAND'] = 'RIGHT'

        # ADD the FQ table
        # skipping for now and limiting to a single spw

        # write the file
        hdulist = fits.HDUList(hdus=[hdu, ant_hdu])
        hdulist.writeto(filename, overwrite=True)
Beispiel #26
0
    indx = eLIER & np.invert((log[1].data['PLATE-IFU'] == '8146-3702')
                             | (log[1].data['PLATE-IFU'] == '8158-3703'))

    mass = log[1].data['MASS_ELL_PETRO'][indx][good_data]

    center_mass = np.median(mass)
    low_mass = mass <= center_mass
    high_mass = mass > center_mass

    aligned_low = deltaPA[low_mass] < alignthreshold
    misaligned_low = np.invert(aligned_low)

    aligned_high = deltaPA[high_mass] < alignthreshold
    misaligned_high = np.invert(aligned_high)
    hdul = fits.HDUList()
    if stack_number == 2:
        hdul.append(fits.PrimaryHDU(wave))
    # =============================================================================
    #
    # =============================================================================
    if stack_number == 1:
        plate = log[1].data['PLATE'][indx][good_data][low_mass][aligned_low]
        ifu = log[1].data['IFUDESIGN'][indx][good_data][low_mass][aligned_low]

    if stack_number == 2:
        plate = log[1].data['PLATE'][indx][good_data][low_mass][misaligned_low]
        ifu = log[1].data['IFUDESIGN'][indx][good_data][low_mass][
            misaligned_low]

    if stack_number == 3:
def condense_pdf1d_files(bname, cur_dir, out_dir, n_sources):

    # useful default for the "failure" case of all negative values and log spacing requested
    # n_bins_default = 50

    # get all the files
    pdf1d_files = sorted(glob.glob(cur_dir + "*_pdf1d.fits"))

    # loop over the pdf1d files, accumulating the 1d pdfs and bins
    with fits.open(pdf1d_files[0]) as hdu:
        cond_pdf1d_name = [
            hdu[i].header["EXTNAME"] for i in range(1, len(hdu))
        ]
    n_qnames = len(cond_pdf1d_name)
    pdf1d_vals = [[] for i in range(n_qnames)]
    pdf1d_bins = [[] for i in range(n_qnames)]

    for i, cur_pdf1d in enumerate(pdf1d_files):

        hdulist = fits.open(cur_pdf1d)

        for k in range(n_qnames):
            pdf1d_histo = hdulist[k + 1].data
            n_cur_source, n_bins = pdf1d_histo.shape
            n_cur_source -= 1

            # copy the 1D PDFs
            pdf1d_vals[k].append(pdf1d_histo[0:n_cur_source, :])
            # copy the bin values
            pdf1d_bins[k].append(pdf1d_histo[-1, :])

    # condense the info into arrays with dimensions [n_stars, max(n_bins), 2]

    tot_stars = np.sum(np.array([i.shape[0] for i in pdf1d_vals[0]]))

    hdulist = fits.HDUList([fits.PrimaryHDU()])

    for k, qname in enumerate(cond_pdf1d_name):

        max_bin_length = np.max(
            [len(pdf1d_bins[k][i]) for i in range(len(pdf1d_bins[k]))])

        # initialize array with NaNs
        cond_data = np.zeros((tot_stars, max_bin_length, 2)) + np.nan
        # fill in the array
        curr_star = 0
        for i in range(len(pdf1d_bins[k])):
            n_bin = len(pdf1d_bins[k][i])
            n_star = pdf1d_vals[k][i].shape[0]
            cond_data[curr_star:curr_star + n_star, 0:n_bin,
                      0] = pdf1d_vals[k][i]
            cond_data[curr_star:curr_star + n_star, 0:n_bin,
                      1] = pdf1d_bins[k][i]
            curr_star += n_star

        chdu = fits.PrimaryHDU(cond_data)
        chdu.header.set("XTENSION", "IMAGE")
        chdu.header.set("EXTNAME", qname)

        hdulist.append(chdu)

    # write the 1D PDFs
    hdulist.writeto(out_dir + "/" + bname + "_pdf1d.fits", overwrite=True)
Beispiel #28
0
    def to_hdulist(self,
                   hdu=None,
                   hdu_bands=None,
                   sparse=False,
                   format="gadf"):
        """Convert to `~astropy.io.fits.HDUList`.

        Parameters
        ----------
        hdu : str
            Name or index of the HDU with the map data.
        hdu_bands : str
            Name or index of the HDU with the BANDS table.
        sparse : bool
            Sparsify the map by only writing pixels with non-zero
            amplitude.
        format : {'gadf', 'fgst-ccube','fgst-template'}
            FITS format convention.

        Returns
        -------
        hdu_list : `~astropy.io.fits.HDUList`

        """
        if sparse:
            hdu = "SKYMAP" if hdu is None else hdu.upper()
        else:
            hdu = "PRIMARY" if hdu is None else hdu.upper()

        if sparse and hdu == "PRIMARY":
            raise ValueError(
                "Sparse maps cannot be written to the PRIMARY HDU.")

        if format in ["fgst-ccube", "fgst-template"]:
            if self.geom.axes[0].name != "energy" or len(self.geom.axes) > 1:
                raise ValueError(
                    "All 'fgst' formats don't support extra axes except for energy."
                )

        if self.geom.axes:
            hdu_bands_out = self.geom.to_bands_hdu(hdu=hdu_bands,
                                                   hdu_skymap=hdu,
                                                   format=format)
            hdu_bands = hdu_bands_out.name
        else:
            hdu_bands = None

        hdu_out = self.to_hdu(hdu=hdu, hdu_bands=hdu_bands, sparse=sparse)

        hdu_out.header["META"] = json.dumps(self.meta)

        hdu_out.header["BUNIT"] = self.unit.to_string("fits")

        if hdu == "PRIMARY":
            hdulist = [hdu_out]
        else:
            hdulist = [fits.PrimaryHDU(), hdu_out]

        if self.geom.axes:
            hdulist += [hdu_bands_out]

        return fits.HDUList(hdulist)
Beispiel #29
0
    c=sigma*(sqrt(-2.*log(x)))*(cos(2.*pi*y))
    if '-s' in sys.argv and str(data1[j][i]) == 'nan':
      data1[j][i]=sky+c
    elif str(data2[j][i]) == 'nan':
      data1[j][i]=data1[j][i]+c

if '-b' in sys.argv:
  for j in range(2,ny-2,1):
    for i in range(2,nx-2,1):
      if str(data2[j][i]) == 'nan':
        avg=data1[j-1][i-1]+data1[j][i-1]+data1[j+1][i-1]
        avg=avg+data1[j-1][i]+data1[j][i]+data1[j+1][i]
        avg=avg+data1[j-1][i+1]+data1[j][i+1]+data1[j+1][i+1]
        data1[j][i]=avg/9.

if '-s' not in sys.argv and '-c' not in sys.argv:
  file=pyfits.open(filename)
  data2=file[0].data
  data3=data2-data1

if os.path.isfile(filename.split('.')[0]+'.stars'): os.remove(filename.split('.')[0]+'.stars')
fitsobj=pyfits.HDUList()
hdu=pyfits.PrimaryHDU()
hdu.header=file[0].header
if '-s' in sys.argv or '-c' in sys.argv:
  hdu.data=data1
else:
  hdu.data=data3
fitsobj.append(hdu)
fitsobj.writeto(filename.split('.')[0]+'.stars')
Beispiel #30
0
def nirspec_cube_pars(tmpdir_factory):
    """ Set up the nirspec cube pars reference file  """

    filename = tmpdir_factory.mktemp('cube_pars')
    filename = str(filename.join('nirspec_cube_pars.fits'))
    hdu0 = fits.PrimaryHDU()
    hdu0.header['REFTYPE'] = 'CUBEPAR'
    hdu0.header['INSTRUME'] = 'NIRSPEC'
    hdu0.header['MODELNAM'] = 'FM'
    hdu0.header['DETECTOR'] = 'N/A'
    hdu0.header['EXP_TYPE'] = 'NRS_IFU'
    hdu0.header['BAND'] = 'N/A'
    hdu0.header['CHANNEL'] = 'N/A'

    # make the first extension
    disp = np.array([
        'PRISM', 'G140M', 'G140M', 'G140H', 'G140H', 'G235M', 'G235H', 'G395M',
        'G395H'
    ])
    filt = np.array([
        'CLEAR', 'F070LP', 'F100LP', 'F070LP', 'F100LP', 'F170LP', 'F170LP',
        'F290LP', 'F290LP'
    ])
    spsize = np.array([0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10])
    wsamp = np.array(
        [0.005, 0.001, 0.0006, 0.0002, 0.0002, 0.001, 0.0004, 0.0017, 0.0007])

    wmin = np.array([0.6, 0.7, 0.97, 0.7, 0.97, 1.66, 1.66, 2.87, 2.87])
    wmax = np.array([5.3, 1.27, 1.89, 1.27, 1.89, 3.17, 3.17, 5.27, 5.27])

    col1 = fits.Column(name='DISPERSER', format='5A', array=disp)
    col2 = fits.Column(name='FILTER', format='6A', array=filt)
    col3 = fits.Column(name='WAVEMIN', format='E', array=wmin, unit='micron')
    col4 = fits.Column(name='WAVEMAX', format='E', array=wmax, unit='micron')
    col5 = fits.Column(name='SPAXELSIZE',
                       format='E',
                       array=spsize,
                       unit='arcsec')
    col6 = fits.Column(name='SPECTRALSTEP',
                       format='D',
                       array=wsamp,
                       unit='micron')

    hdu1 = fits.BinTableHDU.from_columns([col1, col2, col3, col4, col5, col6])
    hdu1.header['EXTNAME'] = 'CUBEPAR'

    # make the second extension
    disp = np.array([
        'PRISM', 'G140M', 'G140M', 'G140H', 'G140H', 'G235M', 'G235H', 'G395M',
        'G395H'
    ])
    filt = np.array([
        'CLEAR', 'F070LP', 'F100LP', 'F070LP', 'F100LP', 'F170LP', 'F170LP',
        'F290LP', 'F290LP'
    ])

    roispat = np.array(
        [0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209])
    roispec = np.array(
        [0.011, 0.0012, 0.0012, 0.0004, 0.0004, 0.002, 0.0008, 0.003, 0.0012])

    power = np.array([2, 2, 2, 2, 2, 2, 2, 2, 2])
    softrad = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])

    col1 = fits.Column(name='DISPERSER', format='5A', array=disp)
    col2 = fits.Column(name='FILTER', format='6A', array=filt)
    col3 = fits.Column(name='ROISPATIAL',
                       format='E',
                       array=roispat,
                       unit='arcsec')
    col4 = fits.Column(name='ROISPECTRAL',
                       format='E',
                       array=roispec,
                       unit='micron')
    col5 = fits.Column(name='POWER', format='I', array=power)
    col6 = fits.Column(name='SOFTRAD',
                       format='E',
                       array=softrad,
                       unit='arcsec')

    hdu2 = fits.BinTableHDU.from_columns([col1, col2, col3, col4, col5, col6])
    hdu2.header['EXTNAME'] = 'CUBEPAR_MSM'

    # make the third extension
    # Define the multiextension wavelength solution
    finalwave = np.arange(0.6, 5.3, 0.1)
    nelem = len(finalwave)

    # Linear relation of spatial roi with wavelength
    roispat = np.ones(nelem) * 0.2
    # Linear relation of spectral roi with wavelength
    roispec = np.ones(nelem) * 0.01
    # Power is 2 at all wavelengths
    power = np.ones(nelem, dtype=int) * 2
    # Softening radius is 0.01 at all wavelengths
    softrad = np.ones(nelem) * 0.01

    col1 = fits.Column(name='WAVELENGTH',
                       format='D',
                       array=finalwave,
                       unit='micron')
    col2 = fits.Column(name='ROISPATIAL',
                       format='E',
                       array=roispat,
                       unit='arcsec')
    col3 = fits.Column(name='ROISPECTRAL',
                       format='E',
                       array=roispec,
                       unit='micron')
    col4 = fits.Column(name='POWER', format='I', array=power)
    col5 = fits.Column(name='SOFTRAD',
                       format='E',
                       array=softrad,
                       unit='arcsec')

    hdu3 = fits.BinTableHDU.from_columns([col1, col2, col3, col4, col5])
    hdu3.header['EXTNAME'] = 'MULTICHAN_PRISM_MSM'

    # make the 4th extension
    # Define the multiextension wavelength solution
    finalwave = np.arange(0.7, 7.7, 0.1)
    nelem = len(finalwave)
    # Linear relation of spatial roi with wavelength
    roispat = np.ones(nelem) * 0.2
    # Linear relation of spectral roi with wavelength
    roispec = np.ones(nelem) * 0.01
    # Power is 2 at all wavelengths
    power = np.ones(nelem, dtype=int) * 2
    # Softening radius is 0.01 at all wavelengths
    softrad = np.ones(nelem) * 0.01

    col1 = fits.Column(name='WAVELENGTH',
                       format='D',
                       array=finalwave,
                       unit='micron')
    col2 = fits.Column(name='ROISPATIAL',
                       format='E',
                       array=roispat,
                       unit='arcsec')
    col3 = fits.Column(name='ROISPECTRAL',
                       format='E',
                       array=roispec,
                       unit='micron')
    col4 = fits.Column(name='POWER', format='I', array=power)
    col5 = fits.Column(name='SOFTRAD',
                       format='E',
                       array=softrad,
                       unit='arcsec')

    hdu4 = fits.BinTableHDU.from_columns([col1, col2, col3, col4, col5])
    hdu4.header['EXTNAME'] = 'MULTICHAN_MED_MSM'

    hdu = fits.HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
    hdu.writeto(filename, overwrite=True)
    return filename