Example #1
0
                currxi[k, 0]), currxi[k, 2], currxi[k, 0]
        index += 1

col1_p = fits.Column(name='BIN1', format='K', array=XI_PLUS[:, 0])
col2_p = fits.Column(name='BIN2', format='K', array=XI_PLUS[:, 1])
col3_p = fits.Column(name='ANGBIN', format='K', array=XI_PLUS[:, 2])
col4_p = fits.Column(name='VALUE', format='D', array=XI_PLUS[:, 3])
col5_p = fits.Column(name='ANG', format='D', array=XI_PLUS[:, 4])

col1_m = fits.Column(name='BIN1', format='K', array=XI_MINUS[:, 0])
col2_m = fits.Column(name='BIN2', format='K', array=XI_MINUS[:, 1])
col3_m = fits.Column(name='ANGBIN', format='K', array=XI_MINUS[:, 2])
col4_m = fits.Column(name='VALUE', format='D', array=XI_MINUS[:, 3])
col5_m = fits.Column(name='ANG', format='D', array=XI_MINUS[:, 4])

cols_p = fits.ColDefs([col1_p, col2_p, col3_p, col4_p, col5_p])
cols_m = fits.ColDefs([col1_m, col2_m, col3_m, col4_m, col5_m])

hdu_xi_plus = fits.BinTableHDU.from_columns(cols_p)
hdu_xi_minus = fits.BinTableHDU.from_columns(cols_m)

hdu_xi_plus.header['2PTDATA'] = True
hdu_xi_plus.header['EXTNAME'] = 'xi_plus'
hdu_xi_plus.header['QUANT1'] = 'G+R'
hdu_xi_plus.header['QUANT2'] = 'G+R'
hdu_xi_plus.header['KERNEL_1'] = 'NZ_SAMPLE'
hdu_xi_plus.header['KERNEL_2'] = 'NZ_SAMPLE'
hdu_xi_plus.header['WINDOWS'] = 'SAMPLE'
hdu_xi_plus.header['N_ZBIN1'] = 4
hdu_xi_plus.header['N_ZBIN2'] = 4
hdu_xi_plus.header['N_ANG'] = 9
Example #2
0
 c_sn = np.array(c_sn, dtype='float')
 c_area = np.array(c_area, dtype='float')
 c_size = np.array(c_size, dtype='float')
 c_id2 = np.array(c_id2)
 c_ngal2 = np.array(c_ngal2, dtype='float')
 g_id = np.array(g_id)
 g_ra = np.array(g_ra, dtype='float')
 g_dec = np.array(g_dec, dtype='float')
 g_z = np.array(g_z, dtype='float')
 from astropy.io import fits
 tbhdu1 = fits.new_table(
     fits.ColDefs([
         fits.Column(name='c_id', format='8A', array=c_id),
         fits.Column(name='c_ra', format='D', array=c_ra),
         fits.Column(name='c_dec', format='D', array=c_dec),
         fits.Column(name='c_z', format='D', array=c_z),
         fits.Column(name='c_ngal', format='D', array=c_ngal),
         fits.Column(name='c_sn', format='D', array=c_sn),
         fits.Column(name='c_area', format='D', array=c_area),
         fits.Column(name='c_size', format='D', array=c_size)
     ]))
 tbhdu2 = fits.new_table(
     fits.ColDefs([
         fits.Column(name='c_id', format='8A', array=c_id2),
         fits.Column(name='c_ngal', format='D', array=c_ngal2),
         fits.Column(name='g_id', format='8A', array=g_id),
         fits.Column(name='g_ra', format='D', array=g_ra),
         fits.Column(name='g_dec', format='D', array=g_dec),
         fits.Column(name='g_z', format='D', array=g_z)
     ]))
 n = np.arange(100.0)
 hdu = fits.PrimaryHDU(n)
Example #3
0
        temp_evt_file, outfile)

    runner.run(cmd_line)

    # Add the GTI extension
    start_column = pyfits.Column(name='START',
                                 format='D',
                                 unit='s',
                                 array=[tstart])
    stop_column = pyfits.Column(name='STOP',
                                format='D',
                                unit='s',
                                array=[tstop])

    gti_ext = pyfits.BinTableHDU.from_columns(
        pyfits.ColDefs([start_column, stop_column]))
    gti_ext.name = "GTI"

    keywords = {
        'TSTART': tstart,
        'TSTOP': tstop,
        'MISSION': 'AXAF    ',
        'TELESCOP': 'CHANDRA ',
        'INSTRUME': 'ACIS    ',
        'TIMESYS': 'TT      ',
        'TIMEUNIT': 's       '
    }

    for key in keywords:

        gti_ext.header[key] = keywords[key]
Example #4
0
def generate_fake_fits_observation(event_list=None,
                                   filename=None,
                                   instr='FPMA',
                                   gti=None,
                                   tstart=None,
                                   tstop=None,
                                   mission='NUSTAR',
                                   mjdref=55197.00076601852,
                                   livetime=None,
                                   additional_columns={}):
    """Generate fake NuSTAR data.

    Takes an event list (as a list of floats)
    All inputs are None by default, and can be set during the call.

    Parameters
    ----------
    event_list : list-like
        :class:`stingray.events.Eventlist` object. If left None, 1000
        random events will be generated, for a total length of 1025 s or the
        difference between tstop and tstart.
    filename : str
        Output file name

    Returns
    -------
    hdulist : FITS hdu list
        FITS hdu list of the output file

    Other Parameters
    ----------------
    mjdref : float
        Reference MJD. Default is 55197.00076601852 (NuSTAR)
    pi : list-like
        The PI channel of each event
    tstart : float
        Start of the observation (s from mjdref)
    tstop : float
        End of the observation (s from mjdref)
    instr : str
        Name of the instrument. Default is 'FPMA'
    livetime : float
        Total livetime. Default is tstop - tstart
    """
    from astropy.io import fits
    import numpy.random as ra

    if event_list is None:
        tstart = assign_value_if_none(tstart, 8e+7)
        tstop = assign_value_if_none(tstop, tstart + 1025)
        ev_list = sorted(ra.uniform(tstart, tstop, 1000))
    else:
        ev_list = event_list.time

    if hasattr(event_list, 'pi'):
        pi = event_list.pi
    else:
        pi = ra.randint(0, 1024, len(ev_list))

    tstart = assign_value_if_none(tstart, np.floor(ev_list[0]))
    tstop = assign_value_if_none(tstop, np.ceil(ev_list[-1]))
    gti = assign_value_if_none(gti, np.array([[tstart, tstop]]))
    filename = assign_value_if_none(filename, 'events.evt')
    livetime = assign_value_if_none(livetime, tstop - tstart)

    if livetime > tstop - tstart:
        raise ValueError('Livetime must be equal or smaller than '
                         'tstop - tstart')

    # Create primary header
    prihdr = fits.Header()
    prihdr['OBSERVER'] = 'Edwige Bubble'
    prihdr['TELESCOP'] = (mission, 'Telescope (mission) name')
    prihdr['INSTRUME'] = (instr, 'Instrument name')
    prihdu = fits.PrimaryHDU(header=prihdr)

    # Write events to table
    col1 = fits.Column(name='TIME', format='1D', array=ev_list)
    col2 = fits.Column(name='PI', format='1J', array=pi)

    allcols = [col1, col2]

    if mission.lower().strip() == 'xmm':
        ccdnr = np.zeros(len(ev_list)) + 1
        ccdnr[1] = 2  # Make it less trivial
        ccdnr[10] = 7
        allcols.append(fits.Column(name='CCDNR', format='1J', array=ccdnr))

    for c in additional_columns.keys():
        col = fits.Column(name=c,
                          array=additional_columns[c]["data"],
                          format=additional_columns[c]["format"])
        allcols.append(col)

    cols = fits.ColDefs(allcols)
    tbhdu = fits.BinTableHDU.from_columns(cols)
    tbhdu.name = 'EVENTS'

    # ---- Fake lots of information ----
    tbheader = tbhdu.header
    tbheader['OBSERVER'] = 'Edwige Bubble'
    tbheader['COMMENT'] = ("FITS (Flexible Image Transport System) format is"
                           " defined in 'Astronomy and Astrophysics', volume"
                           " 376, page 359; bibcode: 2001A&A...376..359H")
    tbheader['TELESCOP'] = (mission, 'Telescope (mission) name')
    tbheader['INSTRUME'] = (instr, 'Instrument name')
    tbheader['OBS_ID'] = ('00000000001', 'Observation ID')
    tbheader['TARG_ID'] = (0, 'Target ID')
    tbheader['OBJECT'] = ('Fake X-1', 'Name of observed object')
    tbheader['RA_OBJ'] = (0.0, '[deg] R.A. Object')
    tbheader['DEC_OBJ'] = (0.0, '[deg] Dec Object')
    tbheader['RA_NOM'] = (0.0,
                          'Right Ascension used for barycenter corrections')
    tbheader['DEC_NOM'] = (0.0, 'Declination used for barycenter corrections')
    tbheader['RA_PNT'] = (0.0, '[deg] RA pointing')
    tbheader['DEC_PNT'] = (0.0, '[deg] Dec pointing')
    tbheader['PA_PNT'] = (0.0, '[deg] Position angle (roll)')
    tbheader['EQUINOX'] = (2.000E+03, 'Equinox of celestial coord system')
    tbheader['RADECSYS'] = ('FK5', 'Coordinate Reference System')
    tbheader['TASSIGN'] = ('SATELLITE', 'Time assigned by onboard clock')
    tbheader['TIMESYS'] = ('TDB', 'All times in this file are TDB')
    tbheader['MJDREFI'] = (int(mjdref),
                           'TDB time reference; Modified Julian Day (int)')
    tbheader['MJDREFF'] = (mjdref - int(mjdref),
                           'TDB time reference; Modified Julian Day (frac)')
    tbheader['TIMEREF'] = ('SOLARSYSTEM',
                           'Times are pathlength-corrected to barycenter')
    tbheader['CLOCKAPP'] = (False, 'TRUE if timestamps corrected by gnd sware')
    tbheader['COMMENT'] = ("MJDREFI+MJDREFF = epoch of Jan 1, 2010, in TT "
                           "time system.")
    tbheader['TIMEUNIT'] = ('s', 'unit for time keywords')
    tbheader['TSTART'] = (tstart,
                          'Elapsed seconds since MJDREF at start of file')
    tbheader['TSTOP'] = (tstop, 'Elapsed seconds since MJDREF at end of file')
    tbheader['LIVETIME'] = (livetime, 'On-source time')
    tbheader['TIMEZERO'] = (0.000000E+00, 'Time Zero')
    tbheader['COMMENT'] = ("Generated with HENDRICS by {0}".format(
        os.getenv('USER')))

    # ---- END Fake lots of information ----

    # Fake GTIs

    start = gti[:, 0]
    stop = gti[:, 1]

    col1 = fits.Column(name='START', format='1D', array=start)
    col2 = fits.Column(name='STOP', format='1D', array=stop)
    allcols = [col1, col2]
    cols = fits.ColDefs(allcols)
    gtinames = ['GTI']
    if mission.lower().strip() == 'xmm':
        gtinames = ['STDGTI01', 'STDGTI02', 'STDGTI07']

    all_new_hdus = [prihdu, tbhdu]
    for name in gtinames:
        gtihdu = fits.BinTableHDU.from_columns(cols)
        gtihdu.name = name
        all_new_hdus.append(gtihdu)

    thdulist = fits.HDUList(all_new_hdus)

    thdulist.writeto(filename, clobber=True)
    return thdulist
Example #5
0
def add_all_broadband_flux(hdu_index=12,
                           filter_index=11,
                           params_index=3,
                           bb_index=1,
                           iq_index=5,
                           aux_index=2,
                           dirname='SCI_IMAGES',
                           mod=10,
                           startz=0.0,
                           endz=12.0,
                           has_aux=True,
                           default=True):

    if not os.path.exists(dirname):
        os.makedirs(dirname)

    if has_aux == True:
        print "Note: Requires AUX HDU!!"

    if (has_aux == False) and (default == True):
        iq_index = iq_index - 1
        hdu_index = hdu_index - 1
        filter_index = filter_index - 1
        params_index = params_index - 1

    print iq_index

    nums = np.asarray([
        '0055', '0056', '0057', '0059', '0060', '0061', '0062', '0063', '0064',
        '0065'
    ])

    #nums=np.asarray(['0016','0017','0018','0019','0020'])
    bbfiles = np.sort(np.array(glob.glob('broadband*.fits')))  #[0:10]

    imind = hdu_index

    hdus = pyfits.open(bbfiles[0])

    cambase = ((hdus)[imind]).data
    print cambase.shape
    singleslice = np.zeros_like(cambase[0, :, :])
    #print singleslice.shape
    totalflux = np.zeros_like(cambase)
    fluxunit = hdus[imind].header.get('IMUNIT')
    print fluxunit
    pixscale = hdus[imind].header.get('CD1_1')
    partialflux = np.zeros_like(cambase)

    totalflux_dandanxu = np.zeros_like(cambase)
    block_dandanxu = np.zeros_like(cambase)
    totalflux_gfs = np.zeros_like(cambase)
    block_gfs = np.zeros_like(cambase)

    zweightslice = np.zeros_like(cambase)
    partialzweight = np.zeros_like(cambase)
    cambasezeros = np.zeros_like(cambase)

    totalsm = np.zeros_like(singleslice)
    partialsm = np.zeros_like(singleslice)
    totalsfr = np.zeros_like(singleslice)
    partialsfr = np.zeros_like(singleslice)
    totalstellarmetals = np.zeros_like(singleslice)
    partialstellarmetals = np.zeros_like(singleslice)
    smdata = np.zeros_like(singleslice)

    filter_hdu = hdus[filter_index]
    params_hdu = hdus[params_index]
    iq_hdu = hdus[iq_index]

    stellarmassunit = 'Msun'
    metalsunit = 'Msun'
    if has_aux == True:
        aux_hdu = hdus[aux_index]
        stellarmassunit = aux_hdu.header.get('MS_UNIT')[
            0:4]  #+'(kpc^2)', we're gonna multiply by the pixel area
        metalsunit = aux_hdu.header.get('MMS_UNIT')[0:4]  #+'(kpc^2)'

    Nfiles = (bbfiles.shape)[0]
    Nlambda = (iq_hdu.data.field('lambda')).shape[0]
    Nfilters = (filter_hdu.data.field('filter')).shape[0]

    z_array = np.ndarray(shape=(Nfiles))
    pix_array = np.ndarray(shape=(Nfiles))
    camD_array = np.ndarray(shape=(Nfiles))
    fov_array = np.ndarray(shape=(Nfiles))
    sm_array = np.ndarray(shape=(Nfiles))
    mets_array = np.ndarray(shape=(Nfiles))

    total_fov = params_hdu.header.get('FOV')  # in radians
    pixel_fov_arcsec = (total_fov * 3600.0 * 180.0 /
                        math.pi) / (1.0 * singleslice.shape[0])

    L_lambda = np.ndarray(shape=(Nfiles, Nlambda))
    L_lambda_unit = iq_hdu.header.get('TUNIT3')
    L_lambda_ns = np.ndarray(shape=(Nfiles, Nlambda))
    L_lambda_ns_unit = iq_hdu.header.get('TUNIT5')
    L_lambda_out = np.ndarray(shape=(Nfiles, Nlambda))
    L_lambda_out_unit = iq_hdu.header.get('TUNIT6')
    L_lambda_abs = np.ndarray(shape=(Nfiles, Nlambda))
    L_lambda_abs_unit = iq_hdu.header.get('TUNIT4')

    lambda_eff = np.ndarray(shape=(Nfiles, Nfilters))
    lambda_eff_unit = filter_hdu.header.get('TUNIT2')
    ewidth_lambda = np.ndarray(shape=(Nfiles, Nfilters))
    ewidth_lambda_unit = filter_hdu.header.get('TUNIT3')
    ewidth_nu = np.ndarray(shape=(Nfiles, Nfilters))
    ewidth_nu_unit = filter_hdu.header.get('TUNIT4')
    L_lambda_eff = np.ndarray(shape=(Nfiles, Nfilters))
    L_lambda_eff_unit = filter_hdu.header.get('TUNIT5')
    AB_mag = np.ndarray(shape=(Nfiles, Nfilters))
    L_lambda_eff_ns = np.ndarray(shape=(Nfiles, Nfilters))
    L_lambda_eff_ns_unit = filter_hdu.header.get('TUNIT7')
    AB_mag_ns = np.ndarray(shape=(Nfiles, Nfilters))
    intSB = np.ndarray(shape=(Nfiles, Nfilters))
    intSB_unit = fluxunit
    cumSB = np.ndarray(shape=(Nfiles, Nfilters))
    cumSB_unit = fluxunit

    filter_array = filter_hdu.data.field('filter')
    col = pyfits.Column(name='filter', format='A30', array=filter_array)
    filcols = pyfits.ColDefs([col])
    filname_hdu = pyfits.new_table(filcols)
    filname_hdu.update_ext_name('FilterNames')

    lam_array = iq_hdu.data.field('lambda')
    lamunit = iq_hdu.header.get('TUNIT1')
    col = pyfits.Column(name='lambda',
                        format='D',
                        unit=lamunit,
                        array=lam_array)
    lamcols = pyfits.ColDefs([col])
    lamhdu = pyfits.new_table(lamcols)
    lamhdu.update_ext_name('Lambda')
    #lamhdu = pyfits.BinTableHDU(lamtab)

    extdata = asciitable.read(
        '/Users/gsnyder/Documents/Projects/Sunrise/sps_models/lmcave_ext_noheader.dat',
        Reader=asciitable.NoHeader)
    extlaminv = extdata['col1']
    ext_AlamAV = extdata['col2']

    #print extlaminv
    #print ext_AlamAV
    extlam_microns = 1.0 / extlaminv

    #return

    #dust params
    #b = -0.5
    s = 0.35  #0.35 #0.35 #s = 1.35
    b = -0.5

    galscale_kpc = 20.0

    Nfilters = (filter_array.shape)[0]
    Npixels = (1.0 * singleslice.shape[0])**2
    i = 0
    count = 0
    slicecount = 0
    initz = 0.0
    #    for ns in nums:
    #        fn = 'broadband_'+ns+'.fits'
    for fn in bbfiles:
        count = count + 1
        hdulist = pyfits.open(fn)
        #why? -- oh, to skip the addition if the run is still proceeding
        if len(hdulist) < imind + 1:
            continue

        camdata = ((hdulist)[imind]).data
        filhdu = hdulist[filter_index]
        bbhdu = hdulist[bb_index]
        iqhdu = hdulist[iq_index]
        camparhdu = hdulist[params_index]
        camhdu = hdulist[imind]
        auxhdu = hdulist[aux_index]

        redshift = bbhdu.header.get('redshift')
        if count % mod == 1:
            initz = redshift

        if redshift < startz:
            continue
        #if redshift > endz:
        #    continue

        lambda_eff[i, :] = filhdu.data.field(
            'lambda_eff')  #; print filhdu.data.field('L_lambda_eff0').shape
        ewidth_lambda[i, :] = filhdu.data.field('ewidth_lambda')
        ewidth_nu[i, :] = filhdu.data.field('ewidth_nu')
        L_lambda_eff[i, :] = filhdu.data.field('L_lambda_eff0')
        AB_mag[i, :] = filhdu.data.field('AB_mag0')
        L_lambda_eff_ns[i, :] = filhdu.data.field('L_lambda_eff_nonscatter0')
        AB_mag_ns[i, :] = filhdu.data.field('AB_mag_nonscatter0')

        z_array[i] = redshift
        pix_array[i] = camhdu.header.get('CD1_1')
        camD_array[i] = camparhdu.header.get('cameradist')
        fov_array[i] = camparhdu.header.get('linear_fov')

        L_lambda[i, :] = iqhdu.data.field('L_lambda')
        L_lambda_ns[i, :] = iqhdu.data.field('L_lambda_nonscatter0')
        L_lambda_out[i, :] = iqhdu.data.field('L_lambda_out0')
        L_lambda_abs[i, :] = iqhdu.data.field('L_lambda_absorbed')

        nzindices = np.where(
            camdata[6, :, :] > 0.0
        )  #nonzero indices -- this is checking the H-band WFC3 I assume?
        numnz = 1.0 * (nzindices[0].shape)[0]
        nzfrac = numnz / Npixels
        nz_kpcsq = nzfrac * pix_array[i]**2

        print fn, redshift  #, Npixels**0.5, numnz, nz_kpcsq, nzfrac, np.min(zweightslice), np.max(zweightslice)
        totalflux = totalflux + camdata
        partialflux = partialflux + camdata

        #is this part uber slow??
        zweightslice = np.where(
            totalflux > 0.0,
            (((totalflux - camdata) * zweightslice + camdata * redshift) /
             totalflux), cambasezeros)
        partialzweight = np.where(
            partialflux > 0.0,
            (((partialflux - camdata) * partialzweight + camdata * redshift) /
             partialflux), cambasezeros)

        if has_aux == True:
            auxdata = auxhdu.data
            smdata = (auxdata)[4, :, :]
            smetalsdata = (auxdata)[5, :, :]  # I THINK
            sfrdata = (auxdata)[2, :, :]

            gasmass = (auxdata)[0, :, :]
            metalmass = (auxdata)[1, :, :]
            sfrimage = (auxdata)[2, :, :]
            gastemp = (auxdata)[3, :, :]
            restframe_lambda_eff = ((1.0e6) * lambda_eff[i, :] /
                                    (1.0 + redshift))
            restframe_invlambda_eff = 1.0 / restframe_lambda_eff

            Alambda_eff = np.interp(restframe_invlambda_eff.flatten(),
                                    extlaminv, ext_AlamAV)

            tau_f = np.zeros_like(gasmass)
            block_dandanxu = np.zeros_like(camdata)
            block_gfs = np.zeros_like(camdata)
            constantblock = np.zeros_like(gasmass)

            nonzero_gasmass_ind = np.where(
                gasmass > 0.0)[0]  #; print nonzero_gasmass_ind.shape
            finite_gasmass_ind = np.isfinite(gasmass)

            sigma_pixels = (galscale_kpc / pix_array[i]) / (1.0 + redshift)
            print 'sigma in pixels    ', sigma_pixels
            t = scipy.signal.gaussian(101, sigma_pixels)
            kernel = t.reshape(101, 1) * t.reshape(1, 101)
            kernel /= kernel.sum()

            gasmass_convolved = gasmass  #scipy.ndimage.filters.gaussian_filter( gasmass, sigma_pixels)
            metalmass_convolved = metalmass  #scipy.ndimage.filters.gaussian_filter( metalmass, sigma_pixels)
            logtemp = np.log10(gastemp)
            covering_factor = 1.0  #0.33 #0.1 + 0.9*(logtemp - 2.5)/(6.1-2.5)   'MOD5'

            metallicity_zsolar = (metalmass_convolved /
                                  gasmass_convolved) / 0.02

            constantblock = covering_factor * ((1.0 + redshift)**b) * (
                (metallicity_zsolar)**
                s) * (metalmass_convolved / 0.02) * 4.28e-8

            for f in range(Nfilters):
                if (nonzero_gasmass_ind.shape)[0] < Npixels:
                    continue
                Alam = Alambda_eff[f]
                tau_f = (Alam) * constantblock
                block_dandanxu[f, :, :] = (camdata[f, :, :] / tau_f) * (
                    1.0 - np.exp(-1.0 * tau_f))
                block_gfs[f, :, :] = camdata[f, :, :] * np.exp(-1.0 * tau_f)
                if f == 6 or f == 2:
                    print Alam, np.max(logtemp), np.min(logtemp), np.max(
                        covering_factor), np.min(covering_factor), np.mean(
                            tau_f), np.max(tau_f), np.max(metallicity_zsolar)

        totalflux_dandanxu = totalflux_dandanxu + block_dandanxu
        totalflux_gfs = totalflux_gfs + block_gfs
        #print np.sum(totalflux), np.sum(totalflux_dandanxu)
        intSB[i, :] = np.sum(np.sum(camdata, axis=1), axis=1)
        cumSB[i, :] = np.sum(np.sum(totalflux, axis=1), axis=1)

        if has_aux == True:
            totalsm = totalsm + smdata * (pix_array[i]**2)  #want per pixel^2
            partialsm = partialsm + smdata * (pix_array[i]**2)
            totalsfr = totalsfr + sfrdata * (pix_array[i]**2
                                             )  #want per pixel^2
            partialsfr = partialsfr + sfrdata * (pix_array[i]**2)
            totalstellarmetals = totalstellarmetals + smetalsdata * (
                pix_array[i]**2)  #want per pixel^2
            partialstellarmetals = partialstellarmetals + smetalsdata * (
                pix_array[i]**2)
            sm_array[i] = np.sum(np.sum(smdata * (pix_array[i]**2), axis=0),
                                 axis=0)
            mets_array[i] = np.sum(np.sum(smetalsdata * (pix_array[i]**2),
                                          axis=0),
                                   axis=0)

        if (count % mod == 0) or (fn == bbfiles[-1]):
            #write partial flux
            slicecount = slicecount + 1
            imagefile = os.path.join(
                dirname, 'bbslice_{:04d}'.format(slicecount) + '.fits')
            primhdu = pyfits.PrimaryHDU(partialflux)
            primhdu.header.update('zmin', initz)
            primhdu.header.update('zmax', redshift)
            primhdu.update_ext_name('sci')
            primhdu.header.update('IMUNIT', fluxunit)
            #sechdu = pyfits.ImageHDU(totalflux)
            #sechdu.update_ext_name('sci_cumulative')
            sechdu = pyfits.ImageHDU(partialsm)
            sechdu.update_ext_name('STELLAR_MASS')
            sechdu.header.update('IMUNIT', stellarmassunit)
            terhdu = pyfits.ImageHDU(partialstellarmetals)
            terhdu.update_ext_name('STELLAR_METALS_MASS')
            terhdu.header.update('IMUNIT', metalsunit)
            sfrhdu = pyfits.ImageHDU(partialsfr)
            sfrhdu.update_ext_name('SFR')
            sechdu.header.update('IMUNIT', stellarmassunit + '/yr')

            tempzhdu = pyfits.ImageHDU(partialzweight)

            tempzhdu.update_ext_name('ZWEIGHTSLICE')

            newlist = pyfits.HDUList(
                [primhdu, filhdu, sechdu, terhdu, tempzhdu, sfrhdu])

            newlist.writeto(imagefile, clobber=True)
            partialflux = np.zeros_like(cambase)
            partialsm = np.zeros_like(singleslice)
            partialstellarmetals = np.zeros_like(singleslice)
            partialzweight = np.zeros_like(cambase)

        i = i + 1
        hdulist.close()

    primhdu = pyfits.PrimaryHDU(totalflux)
    primhdu.header.update('IMUNIT', fluxunit)
    primhdu.header.update('PIXSCALE', pixel_fov_arcsec)
    sfrhdu = pyfits.ImageHDU(totalsfr)
    sfrhdu.update_ext_name('SFR')
    sfrhdu.header.update('IMUNIT', stellarmassunit + '/yr')
    sechdu = pyfits.ImageHDU(totalsm)
    sechdu.update_ext_name('STELLAR_MASS')
    sechdu.header.update('IMUNIT', stellarmassunit)
    terhdu = pyfits.ImageHDU(totalstellarmetals)
    terhdu.update_ext_name('STELLAR_METALS_MASS')
    terhdu.header.update('IMUNIT', metalsunit)
    dusthdu = pyfits.ImageHDU(totalflux_dandanxu)
    dusthdu.header.update('IMUNIT', fluxunit)
    dusthdu.update_ext_name('SEMIANALYTIC_DUST')
    gfshdu = pyfits.ImageHDU(totalflux_gfs)
    gfshdu.header.update('IMUNIT', fluxunit)
    gfshdu.update_ext_name('GFS_SCREEN_DUST')

    #zweightvalues = np.where(totalflux > 0.0, zweightslice/totalflux, cambasezeros - 1.0)
    zhdu = pyfits.ImageHDU(zweightslice)
    zhdu.update_ext_name('WEIGHTED_Z')

    L_lambda_hdu = pyfits.ImageHDU(L_lambda)
    L_lambda_hdu.update_ext_name('L_lambda')
    L_lambda_hdu.header.update('UNIT', L_lambda_unit)
    L_lambda_ns_hdu = pyfits.ImageHDU(L_lambda_ns)
    L_lambda_ns_hdu.update_ext_name('L_lambda_nonscatter')
    L_lambda_ns_hdu.header.update('UNIT', L_lambda_ns_unit)
    L_lambda_out_hdu = pyfits.ImageHDU(L_lambda_out)
    L_lambda_out_hdu.update_ext_name('L_lambda_out')
    L_lambda_out_hdu.header.update('UNIT', L_lambda_out_unit)
    L_lambda_abs_hdu = pyfits.ImageHDU(L_lambda_abs)
    L_lambda_abs_hdu.update_ext_name('L_lambda_absorption')
    L_lambda_abs_hdu.header.update('UNIT', L_lambda_abs_unit)

    filhdu1 = pyfits.ImageHDU(lambda_eff)
    filhdu1.update_ext_name('lambda_eff')
    filhdu1.header.update('UNIT', lambda_eff_unit)
    filhdu2 = pyfits.ImageHDU(ewidth_lambda)
    filhdu2.update_ext_name('ewidth_lambda')
    filhdu2.header.update('UNIT', ewidth_lambda_unit)
    filhdu3 = pyfits.ImageHDU(ewidth_nu)
    filhdu3.update_ext_name('ewidth_nu')
    filhdu3.header.update('UNIT', ewidth_nu_unit)
    filhdu4 = pyfits.ImageHDU(L_lambda_eff)
    filhdu4.update_ext_name('L_lambda_eff')
    filhdu4.header.update('UNIT', L_lambda_eff_unit)
    filhdu5 = pyfits.ImageHDU(AB_mag)
    filhdu5.update_ext_name('AB_mag')
    filhdu6 = pyfits.ImageHDU(L_lambda_eff_ns)
    filhdu6.update_ext_name('L_lambda_eff_nonscatter')
    filhdu6.header.update('UNIT', L_lambda_eff_ns_unit)
    filhdu7 = pyfits.ImageHDU(AB_mag_ns)
    filhdu7.update_ext_name('AB_mag_nonscatter')
    filhdu8 = pyfits.ImageHDU(intSB)
    filhdu8.update_ext_name('total_SB')
    filhdu8.header.update('UNIT', intSB_unit)
    filhdu9 = pyfits.ImageHDU(cumSB)
    filhdu9.update_ext_name('cumulative_SB')
    filhdu9.header.update('UNIT', cumSB_unit)

    col1 = pyfits.Column(name='redshift', format='D', array=z_array)
    col2 = pyfits.Column(name='pixelscale',
                         format='D',
                         unit='kpc',
                         array=pix_array)
    col3 = pyfits.Column(name='cameradistance',
                         format='D',
                         unit='kpc',
                         array=camD_array)
    col4 = pyfits.Column(name='linearfov',
                         format='D',
                         unit='kpc',
                         array=fov_array)
    col5 = pyfits.Column(name='stellarmass',
                         format='D',
                         unit=stellarmassunit,
                         array=sm_array)
    col6 = pyfits.Column(name='stellarmetalsmass',
                         format='D',
                         unit=metalsunit,
                         array=mets_array)
    cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6])
    geometryhdu = pyfits.new_table(cols)
    geometryhdu.update_ext_name('GEOMETRY')

    newlist = pyfits.HDUList([
        primhdu, dusthdu, gfshdu, sechdu, terhdu, sfrhdu, zhdu, geometryhdu,
        lamhdu, L_lambda_hdu, L_lambda_ns_hdu, L_lambda_out_hdu,
        L_lambda_abs_hdu, filname_hdu, filhdu1, filhdu2, filhdu3, filhdu4,
        filhdu5, filhdu6, filhdu7, filhdu8, filhdu9
    ])
    newlist.writeto(os.path.join(dirname, 'bbdata.fits'), clobber=True)

    result = write_filtered_images(totalflux,
                                   filter_hdu,
                                   dirname=dirname,
                                   imunit=fluxunit)

    subprocess.call(['cp', 'sfrhist_base.stub', dirname])
    subprocess.call(['cp', 'mcrx_base.stub', dirname])
    subprocess.call(['cp', 'broadband_base.stub', dirname])
    subprocess.call(['cp', 'simpar', dirname])

    return totalflux
Example #6
0
def make_savefile(simname, haloname, simdir, DD, ds, ad):
    DDname = 'DD%.4i' % DD

    mss = ad['stars', 'particle_mass'].in_units('Msun')
    msd = ad['darkmatter', 'particle_mass'].in_units('Msun')

    ags = ad['stars', 'age'].in_units('Gyr')
    agd = ad['darkmatter', 'age'].in_units('Gyr')

    xs_box = ad['stars', 'particle_position_x'].in_units('kpc')
    ys_box = ad['stars', 'particle_position_y'].in_units('kpc')
    zs_box = ad['stars', 'particle_position_z'].in_units('kpc')
    vxs_box = ad['stars', 'particle_velocity_x'].in_units('km/s')
    vys_box = ad['stars', 'particle_velocity_y'].in_units('km/s')
    vzs_box = ad['stars', 'particle_velocity_z'].in_units('km/s')

    xd_box = ad['darkmatter', 'particle_position_x'].in_units('kpc')
    yd_box = ad['darkmatter', 'particle_position_y'].in_units('kpc')
    zd_box = ad['darkmatter', 'particle_position_z'].in_units('kpc')
    vxd_box = ad['darkmatter', 'particle_velocity_x'].in_units('km/s')
    vyd_box = ad['darkmatter', 'particle_velocity_y'].in_units('km/s')
    vzd_box = ad['darkmatter', 'particle_velocity_z'].in_units('km/s')

    ids = ad['stars', 'particle_index']
    idd = ad['darkmatter', 'particle_index']

    hdus = []
    prim_hdu = fits.PrimaryHDU()
    hdus.append(prim_hdu)

    colss = fits.ColDefs([
        fits.Column(name='id', array=ids, format='D'),
        fits.Column(name='mass', array=mss, format='D'),
        fits.Column(name='x_box  ', array=xs_box, format='D'),
        fits.Column(name='y_box  ', array=ys_box, format='D'),
        fits.Column(name='z_box  ', array=zs_box, format='D'),
        fits.Column(name='vx_box ', array=vxs_box, format='D'),
        fits.Column(name='vy_box ', array=vys_box, format='D'),
        fits.Column(name='vz_box ', array=vzs_box, format='D'),
        fits.Column(name='age', array=ags, format='D'),
    ])

    colsd = fits.ColDefs([
        fits.Column(name='id', array=idd, format='D'),
        fits.Column(name='mass', array=msd, format='D'),
        fits.Column(name='x_box  ', array=xd_box, format='D'),
        fits.Column(name='y_box  ', array=yd_box, format='D'),
        fits.Column(name='z_box  ', array=zd_box, format='D'),
        fits.Column(name='vx_box ', array=vxd_box, format='D'),
        fits.Column(name='vy_box ', array=vyd_box, format='D'),
        fits.Column(name='vz_box ', array=vzd_box, format='D'),
        fits.Column(name='age', array=agd, format='D'),
    ])

    hdus.append(fits.BinTableHDU.from_columns(colss, name='stars'))
    hdus.append(fits.BinTableHDU.from_columns(colsd, name='dark'))

    hdus_fits = fits.HDUList(hdus)
    hdus_fits.writeto(
        '/nobackupp2/rcsimons/foggie_momentum/particles/%s/%s/%s_DD%.4i_particles.fits'
        % (haloname, simname, simname, DD),
        overwrite=True)
Example #7
0
        col6 = fits.Column(name='star name (TYC)',
                           format='20A',
                           array=np.array(six))
        col7 = fits.Column(name='field id',
                           format='20A',
                           array=np.array(seven))
        col8 = fits.Column(name='image url',
                           format='55A',
                           array=np.array(eight))
        col9 = fits.Column(name='r (pix)', format='E', array=np.array(nine))
        col10 = fits.Column(name='r (arcsec)', format='E', array=np.array(ten))
        col11 = fits.Column(name='no Vmag data',
                            format='5A',
                            array=np.array(eleven))
        cols = fits.ColDefs([
            col1, col2, col7, col6, col3, col4, col5, col8, col9, col10, col11
        ])
        tbhdu = fits.BinTableHDU.from_columns(cols)
        tbhdu.writeto('HEARTStars' + str(magMin) + '-' + str(magMax) +
                      '_PART.fits',
                      clobber=True)
        save = save + 1000

    j += 1

#When all stars and images are tested, all cases where a star
#falls on or near an image are stored to a fits table.
col1 = fits.Column(name='run', format='20A', array=np.array(one))
col2 = fits.Column(name='ccd', format='20A', array=np.array(two))
col3 = fits.Column(name='star mag', format='20A', array=np.array(three))
col4 = fits.Column(name='star ra', format='E', array=np.array(four))
Example #8
0
def desi_qso_templates(z_wind=0.2, zmnx=(0.4,4.), outfil=None, N_perz=500,
                       boss_pca_fil=None, wvmnx=(3500., 10000.),
                       rebin_wave=None, rstate=None,
                       sdss_pca_fil=None, no_write=False, redshift=None,
                       seed=None, old_read=False, ipad=40, cosmo=None):
    """ Generate QSO templates for DESI

    Rebins to input wavelength array (or log10 in wvmnx)

    Parameters
    ----------
    z_wind : float, optional
      Window for sampling PCAs
    zmnx : tuple, optional
      Min/max for generation
    N_perz : int, optional
      Number of draws per redshift window
    old_read : bool, optional
      Read the files the old way
    seed : int, optional
      Seed for the random number state
    rebin_wave : ndarray, optional
      Input wavelengths for rebinning
    wvmnx : tuple, optional
      Wavelength limits for rebinning (not used with rebin_wave)
    redshift : ndarray, optional
      Redshifts desired for the templates
    ipad : int, optional
      Padding for enabling enough models
    cosmo: astropy.cosmology.core, optional
       Cosmology inistantiation from astropy.cosmology.code
    Returns
    -------
    wave : ndarray
      Wavelengths that the spectra were rebinned to
    flux : ndarray (2D; flux vs. model)
    z : ndarray
      Redshifts
    """


    # Cosmology
    if cosmo is None:
        from astropy import cosmology
        cosmo = cosmology.core.FlatLambdaCDM(70., 0.3)

    if old_read:
        # PCA values
        if boss_pca_fil is None:
            boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
        hdu = fits.open(boss_pca_fil)
        boss_pca_coeff = hdu[1].data

        if sdss_pca_fil is None:
            sdss_pca_fil = 'SDSS_DR7Lya_PCA_values_nocut.fits.gz'
        hdu2 = fits.open(sdss_pca_fil)
        sdss_pca_coeff = hdu2[1].data

        # Open the BOSS catalog file
        boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
        bcat_hdu = fits.open(boss_cat_fil)
        t_boss = bcat_hdu[1].data
        boss_zQSO = t_boss['z_pipe']

        # Open the SDSS catalog file
        sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'
        scat_hdu = fits.open(sdss_cat_fil)
        t_sdss = scat_hdu[1].data
        sdss_zQSO = t_sdss['z']
        if len(sdss_pca_coeff) != len(sdss_zQSO):
            print('Need to finish running the SDSS models!')
            sdss_zQSO = sdss_zQSO[0:len(sdss_pca_coeff)]
        # Eigenvectors
        eigen, eigen_wave = fbq.read_qso_eigen()
    else:
        infile = desisim.io.find_basis_template('qso')
        with fits.open(infile) as hdus:
            hdu_names = [hdus[ii].name for ii in range(len(hdus))]
            boss_pca_coeff = hdus[hdu_names.index('BOSS_PCA')].data
            sdss_pca_coeff = hdus[hdu_names.index('SDSS_PCA')].data
            boss_zQSO = hdus[hdu_names.index('BOSS_Z')].data
            sdss_zQSO = hdus[hdu_names.index('SDSS_Z')].data
            eigen = hdus[hdu_names.index('SDSS_EIGEN')].data
            eigen_wave = hdus[hdu_names.index('SDSS_EIGEN_WAVE')].data

    # Fiddle with the eigen-vectors
    npix = len(eigen_wave)
    chkpix = np.where((eigen_wave > 900.) & (eigen_wave < 5000.) )[0]
    lambda_912 = 911.76
    pix912 = np.argmin( np.abs(eigen_wave-lambda_912) )

    # Loop on redshift.  If the
    if redshift is None:
        z0 = np.arange(zmnx[0],zmnx[1],z_wind)
        z1 = z0 + z_wind
    else:
        if np.isscalar(redshift):
            z0 = np.array([redshift])
        else:
            z0 = redshift.copy()
        z1 = z0.copy() #+ z_wind


    pca_list = ['PCA0', 'PCA1', 'PCA2', 'PCA3']
    PCA_mean = np.zeros(4)
    PCA_sig = np.zeros(4)
    PCA_rand = np.zeros((4,N_perz*ipad))

    final_spec = np.zeros((npix, N_perz * len(z0)))
    final_wave = np.zeros((npix, N_perz * len(z0)))
    final_z = np.zeros(N_perz * len(z0))

    # Random state
    if rstate is None:
        rstate = np.random.RandomState(seed)

    for ii in range(len(z0)):

        # BOSS or SDSS?
        if z0[ii] > 2.15:
            zQSO = boss_zQSO
            pca_coeff = boss_pca_coeff
        else:
            zQSO = sdss_zQSO
            pca_coeff = sdss_pca_coeff

        # Random z values and wavelengths
        zrand = rstate.uniform( z0[ii], z1[ii], N_perz*ipad)
        wave = np.outer(eigen_wave, 1+zrand)

        # MFP (Worseck+14)
        mfp = 37. * ( (1+zrand)/5. )**(-5.4) # Physical Mpc

        # Grab PCA mean + sigma
        if redshift is None:
            idx = np.where( (zQSO >= z0[ii]) & (zQSO < z1[ii]) )[0]
        else:
            # Hack by @moustakas: add a little jitter to get the set of QSOs
            # that are *nearest* in redshift to the desired output redshift.
            idx = np.where( (zQSO >= z0[ii]-0.01) & (zQSO < z1[ii]+0.01) )[0]
            if len(idx) == 0:
                idx = np.array([(np.abs(zQSO-zrand[0])).argmin()])
                #pdb.set_trace()
        log.debug('Making z=({:g},{:g}) with {:d} input quasars'.format(z0[ii],z1[ii],len(idx)))

        # Get PCA stats and random values
        for jj,ipca in enumerate(pca_list):
            if jj == 0:  # Use bounds for PCA0 [avoids negative values]
                xmnx = perc(pca_coeff[ipca][idx], per=95)
                PCA_rand[jj, :] = rstate.uniform(xmnx[0], xmnx[1], N_perz*ipad)
            else:
                PCA_mean[jj] = np.mean(pca_coeff[ipca][idx])
                PCA_sig[jj] = np.std(pca_coeff[ipca][idx])
                # Draws
                PCA_rand[jj, :] = rstate.uniform( PCA_mean[jj] - 2*PCA_sig[jj],
                                        PCA_mean[jj] + 2*PCA_sig[jj], N_perz*ipad)

        # Generate the templates (ipad*N_perz)
        spec = np.dot(eigen.T, PCA_rand)

        # Take first good N_perz

        # Truncate, MFP, Fill
        ngd = 0
        nbad = 0
        for kk in range(ipad*N_perz):
            # Any zero values?
            mn = np.min(spec[chkpix, kk])
            if mn < 0.:
                nbad += 1
                continue

            # MFP
            if z0[ii] > 2.39:
                z912 = wave[0:pix912,kk]/lambda_912 - 1.
                phys_dist = np.fabs( cosmo.lookback_distance(z912) -
                                cosmo.lookback_distance(zrand[kk]) ) # Mpc
                spec[0:pix912, kk] = spec[0:pix912,kk] * np.exp(-phys_dist.value/mfp[kk])

            # Write
            final_spec[:, ii*N_perz+ngd] = spec[:,kk]
            final_wave[:, ii*N_perz+ngd] = wave[:,kk]
            final_z[ii*N_perz+ngd] = zrand[kk]
            ngd += 1
            if ngd == N_perz:
                break
        if ngd != N_perz:
            print('Did not make enough!')
            #pdb.set_trace()
            log.warning('Did not make enough qso templates. ngd = {}, N_perz = {}'.format(ngd,N_perz))

    # Rebin
    if rebin_wave is None:
        light = C_LIGHT        # [km/s]
        velpixsize = 10.            # [km/s]
        pixsize = velpixsize/light/np.log(10) # [pixel size in log-10 A]
        minwave = np.log10(wvmnx[0])          # minimum wavelength [log10-A]
        maxwave = np.log10(wvmnx[1])          # maximum wavelength [log10-A]
        r_npix = np.round((maxwave-minwave)/pixsize+1)

        log_wave = minwave+np.arange(r_npix)*pixsize # constant log-10 spacing
    else:
        log_wave = np.log10(rebin_wave)
        r_npix = len(log_wave)

    totN = N_perz * len(z0)
    rebin_spec = np.zeros((r_npix, totN))


    for ii in range(totN):
        # Interpolate (in log space)
        rebin_spec[:, ii] = resample_flux(log_wave, np.log10(final_wave[:, ii]), final_spec[:, ii])
        #f1d = interp1d(np.log10(final_wave[:,ii]), final_spec[:,ii])
        #rebin_spec[:,ii] = f1d(log_wave)

    if outfil is None:
        return 10.**log_wave, rebin_spec, final_z

    # Transpose for consistency
    out_spec = np.array(rebin_spec.T, dtype='float32')

    # Write
    hdu = fits.PrimaryHDU(out_spec)
    hdu.header.set('PROJECT', 'DESI QSO TEMPLATES')
    hdu.header.set('VERSION', '1.1')
    hdu.header.set('OBJTYPE', 'QSO')
    hdu.header.set('DISPAXIS',  1, 'dispersion axis')
    hdu.header.set('CRPIX1',  1, 'reference pixel number')
    hdu.header.set('CRVAL1',  minwave, 'reference log10(Ang)')
    hdu.header.set('CDELT1',  pixsize, 'delta log10(Ang)')
    hdu.header.set('LOGLAM',  1, 'log10 spaced wavelengths?')
    hdu.header.set('AIRORVAC', 'vac', ' wavelengths in vacuum (vac) or air')
    hdu.header.set('VELSCALE', velpixsize, ' pixel size in km/s')
    hdu.header.set('WAVEUNIT', 'Angstrom', ' wavelength units')
    hdu.header.set('BUNIT', '1e-17 erg/s/cm2/A', ' flux unit')

    idval = list(range(totN))
    col0 = fits.Column(name=str('TEMPLATEID'),format=str('J'), array=idval)
    col1 = fits.Column(name=str('Z'),format=str('E'),array=final_z)
    cols = fits.ColDefs([col0, col1])
    tbhdu = fits.BinTableHDU.from_columns(cols)
    tbhdu.header.set('EXTNAME','METADATA')

    hdulist = fits.HDUList([hdu, tbhdu])
    hdulist.writeto(outfil, overwrite=True)

    return final_wave, final_spec, final_z
Example #9
0
def write_fits_spec(filename,
                    wavelengths,
                    fluxes,
                    pri_header={},
                    ext_header={},
                    overwrite=False,
                    trim_zero=True,
                    pad_zero_ends=True,
                    precision=None,
                    epsilon=0.00032,
                    wave_col='WAVELENGTH',
                    flux_col='FLUX',
                    wave_unit=u.AA,
                    flux_unit=units.FLAM):
    """Write FITS spectrum.

    .. warning::

        If data is being written out as single-precision but wavelengths
        are in double-precision, some rows may be omitted.

    Parameters
    ----------
    filename : str
        Output spectrum filename.

    wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity`
        Wavelength and flux of the spectrum.

    pri_header, ext_header : dict
        Metadata to be added to primary and given extension FITS header,
        respectively. Do *not* use this to define column names and units.

    overwrite : bool
        Overwrite existing file. Defaults to `False`.

    trim_zero : bool
        Remove rows with zero-flux. Default is `True`.

    pad_zero_ends : bool
        Pad each end of the spectrum with a row of zero flux
        like :func:`synphot.spectrum.BaseSpectrum.taper`.
        This is unnecessary if input is already tapered.

    precision : {`None`, 'single', 'double'}
        Precision of values in output file.
        Use native flux precision by default.

    epsilon : float
        Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ.
        This is the minimum separation in wavelengths necessary for SYNPHOT
        to read the entries as distinct single-precision numbers.
        This is *only* used if ``precision='single'`` but data are in
        double-precision. Default from the FAQ is 0.00032.

    wave_col, flux_col : str
        Wavelength and flux column names (case-insensitive).

    wave_unit, flux_unit : str or `~astropy.units.core.Unit`
        Wavelength and flux units, which default to Angstrom and FLAM,
        respectively. These are *only* used if wavelengths and fluxes
        are not in astropy quantities.

    Raises
    ------
    synphot.exceptions.SynphotError
        Wavelengths and fluxes have difference shapes or value precision
        is not supported.

    """
    if isinstance(wavelengths, u.Quantity):
        wave_unit = wavelengths.unit
        wave_value = wavelengths.value
    else:
        wave_value = wavelengths

    if isinstance(fluxes, u.Quantity):
        flux_unit = fluxes.unit
        flux_value = fluxes.value
    else:
        flux_value = fluxes

    wave_unit = units.validate_unit(wave_unit).to_string().upper()
    flux_unit = units.validate_unit(flux_unit).to_string().upper()

    if wave_value.shape != flux_value.shape:
        raise exceptions.SynphotError(
            'Wavelengths have shape {0} but fluxes have shape {1}'.format(
                wave_value.shape, flux_value.shape))

    # Remove rows with zero flux. Putting this before precision logic to avoid
    # keeping duplicate wavelengths with zero flux.
    if trim_zero:
        idx = np.where(flux_value != 0)
        wave_value = wave_value[idx]
        flux_value = flux_value[idx]

        n_thrown = wave_value.size - len(idx[0])
        if n_thrown != 0:
            log.info('{0} zero-flux rows are thrown out'.format(n_thrown))

    # Only these Numpy types are supported
    #    'f'   np.float32
    #    'd'   np.float64
    pcodes = {'d': 'D', 'f': 'E'}  # Numpy to FITS conversion

    # Use native flux precision
    if precision is None:
        precision = flux_value.dtype.char
        if precision not in pcodes:
            raise exceptions.SynphotError('flux is not float32 or float64')

    # Use user specified precision
    else:
        precision = precision.lower()
        if precision == 'single':
            precision = 'f'
        elif precision == 'double':
            precision = 'd'
        else:
            raise exceptions.SynphotError('precision must be single or double')

    # Now check wavelength precision
    wave_precision = wave_value.dtype.char
    if wave_precision not in pcodes:
        raise exceptions.SynphotError('wavelength is not float32 or float64')

    # If wavelength is double-precision but data is written out as
    # single-precision, wavelength values have to be recalculated
    # so that they will still be sorted with no duplicates.
    if wave_precision == 'd' and precision == 'f':
        orig_size = wave_value.size
        idx = np.where(np.abs(wave_value[1:] - wave_value[:-1]) > epsilon)
        wave_value = np.append(wave_value[idx], wave_value[-1])
        flux_value = np.append(flux_value[idx], flux_value[-1])
        n_thrown = orig_size - wave_value.size
        if n_thrown != 0:
            warnings.warn(
                '{0} rows are thrown out in converting wavelengths from '
                'double- to single-precision'.format(n_thrown),
                AstropyUserWarning)

    # Keep one zero at each end
    if pad_zero_ends:
        w1 = wave_value[0]**2 / wave_value[1]
        w2 = wave_value[-1]**2 / wave_value[-2]
        wave_value = np.insert(wave_value, [0, wave_value.size], [w1, w2])
        flux_value = np.insert(flux_value, [0, flux_value.size], [0.0, 0.0])

    # Construct the columns
    cw = fits.Column(name=wave_col,
                     array=wave_value,
                     unit=wave_unit,
                     format=pcodes[precision])
    cf = fits.Column(name=flux_col,
                     array=flux_value,
                     unit=flux_unit,
                     format=pcodes[precision])

    # These are written to the primary header:
    #   1. Filename
    #   2. Origin
    #   3. User dictionary (can overwrite defaults)
    hdr_hdu = fits.PrimaryHDU()
    hdr_hdu.header['filename'] = (os.path.basename(filename), 'name of file')
    hdr_hdu.header['origin'] = ('synphot', 'Version {0}'.format(__version__))
    for key, val in pri_header.items():
        hdr_hdu.header[key] = val

    # Make the extension HDU and include user dictionary in extension header.
    tab_hdu = fits.BinTableHDU.from_columns(fits.ColDefs([cw, cf]))
    for key, val in ext_header.items():
        tab_hdu.header[key] = val

    # Write to file
    hdulist = fits.HDUList([hdr_hdu])
    hdulist.append(tab_hdu)
    hdulist.writeto(filename, overwrite=overwrite)
Example #10
0
def kepextract(infile,
               outfile,
               maskfile='ALL',
               bkg=False,
               psfcentroid=False,
               overwrite=False,
               verbose=False,
               logfile='kepextract.log'):
    """
    kepextract -- create a light curve from a target pixel file by summing
    user-selected pixels

    kepextract calculates simple aperture photometry, from a target pixel file,
    for a user-supplied set of pixels. The Kepler pipeline sums a specific set
    of pixels to produce the standard light curves delivered to users. Termed
    the optimal aperture, the default pixel set is designed to maximize the
    signal-to-noise ratio of the resulting light curve, optimizing for transit
    detection. This tool provides users with a straightforward capability to
    alter the summed pixel set. Applications include:

        * Use of all pixels in the aperture

        * The pipeline does not produce a light curve for sources observed with
          custom or dedicated pixel masks. The user can create a light curve for
          these sources using kepextract.

        * Construction of pixel light curves, in which the time series for a single
          pixel can be examined. Light curves for extended sources which may be
          poorly sampled by the optimal aperture.

    Parameters
    ----------
    infile : str
        Filename for the target pixel file.
    outfile : str
        Filename for the output light curve. This product will be written to
        the same FITS format as archived light curves.
    maskfile : str
        This string can be one of three options::

            * 'ALL' tells the task to calculate principal components from all
            pixels within the pixel mask stored in the input file.

            * 'APER' tells the task to calculate principal components from only
            the pixels within the photometric aperture stored in the input file
            (e.g. only those pixels summed by the Kepler pipeline to produce
            the light curve archived at MAST.

            * A filename describing the desired photometric aperture. Such a
            file can be constructed using the kepmask or kepffi tools, or can
            be created manually using the format described in the documentation
            for those tools.
    bkg : bool
        Option to subtract an estimate of the background. Background is
        calculated by identifying the median pixel value for each exposure.
        This method requires an adequate number of pixels in within the target
        mask that contain background and negligible source flux. Note that
        background has already been subtracted from calibrated Kepler Target
        Pixel Files, but not early campaign data from the K2 mission.
    psfcentroid : bool
        Measure the star's position by fitting a 2D Gaussian PSF to the pixels
        in the mask. This will populate values for PSF_CENTR1 (column position)
        and PSF_CENTR2 (row position) in the output file.
    overwrite : bool
        Overwrite the output file?
    verbose : bool
        Option for verbose mode, in which informative messages and warnings to
        the shell and a logfile.
    logfile : str
        Name of the logfile containing error and warning messages.

    Examples
    --------
    .. code-block:: bash

        $ kepextract kplr008256049-2010174085026_lpd-targ.fits outlc.fits --maskfile ALL

    One further can plot the resulted light curve by doing

    .. code-block:: python

        import matplotlib.pyplot as plt
        from astropy.io import fits

        f = fits.open('outlc.fits')
        plt.plot(f[1].data['TIME'], f[1].data['SAP_FLUX'])

    or

    .. code-block:: bash

        $ kepdraw outlc.fits

    .. image:: ../_static/images/api/kepextract.png
        :align: center
    """

    # log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = ('KEPEXTRACT -- ' + ' infile={}'.format(infile) +
            ' maskfile={}'.format(maskfile) + ' outfile={}'.format(outfile) +
            ' background={}'.format(bkg) +
            ' psfcentroid={}'.format(psfcentroid) +
            ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) +
            ' logfile={}'.format(logfile))
    kepmsg.log(logfile, call + '\n', verbose)

    # start time
    kepmsg.clock('KEPEXTRACT started at', logfile, verbose)
    # overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = (
            'ERROR -- KEPEXTRACT: {} exists. Use --overwrite'.format(outfile))
        kepmsg.err(logfile, errmsg, verbose)

    # open input file
    instr = pyfits.open(infile, mode='readonly', memmap=True)
    tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
                                                    verbose)

    # fudge non-compliant FITS keywords with no values
    instr = kepkey.emptykeys(instr, infile, logfile, verbose)

    # input file data
    cards0 = instr[0].header.cards
    cards1 = instr[1].header.cards
    cards2 = instr[2].header.cards
    table = instr[1].data[:]
    maskmap = copy(instr[2].data)

    print("Extracting information from Target Pixel File...")

    # input table data
    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, time = \
        kepio.readTPF(infile, 'TIME', logfile, verbose)
    time = np.array(time, dtype='float64')

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, timecorr = \
        kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
    timecorr = np.array(timecorr, dtype='float32')

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, cadenceno = \
        kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
    cadenceno = np.array(cadenceno, dtype='int')

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, raw_cnts = \
        kepio.readTPF(infile, 'RAW_CNTS', logfile, verbose)

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, flux = \
        kepio.readTPF(infile, 'FLUX', logfile, verbose)

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, flux_err = \
        kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, flux_bkg = \
        kepio.readTPF(infile, 'FLUX_BKG', logfile, verbose)

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err = \
        kepio.readTPF(infile, 'FLUX_BKG_ERR', logfile, verbose)

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, cosmic_rays = \
        kepio.readTPF(infile, 'COSMIC_RAYS', logfile, verbose)

    kepid, channel, skygroup, module, output, quarter, season, \
    ra, dec, column, row, kepmag, xdim, ydim, quality = \
        kepio.readTPF(infile, 'QUALITY', logfile, verbose)
    quality = np.array(quality, dtype='int')

    try:
        #  ---for FITS wave #2
        pos_corr1 = np.array(table.field('POS_CORR1'), dtype='float64')
    except:
        pos_corr1 = np.empty(len(time))
        # ---temporary before FITS wave #2
        pos_corr1[:] = np.nan
    try:
        #  ---for FITS wave #2
        pos_corr2 = np.array(table.field('POS_CORR2'), dtype='float64')
    except:
        pos_corr2 = np.empty(len(time))
        # ---temporary before FITS wave #2
        pos_corr2[:] = np.nan

    # dummy columns for output file
    psf_centr1 = np.empty(len(time))
    psf_centr1[:] = np.nan
    psf_centr2 = np.empty(len(time))
    psf_centr2[:] = np.nan
    mom_centr1 = np.empty(len(time))
    mom_centr1[:] = np.nan
    mom_centr2 = np.empty(len(time))
    mom_centr2[:] = np.nan
    psf_centr1_err = np.empty(len(time))
    psf_centr1_err[:] = np.nan
    psf_centr2_err = np.empty(len(time))
    psf_centr2_err[:] = np.nan
    mom_centr1_err = np.empty(len(time))
    mom_centr1_err[:] = np.nan
    mom_centr2_err = np.empty(len(time))
    mom_centr2_err[:] = np.nan

    # read mask definition file
    if 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
        maskx = np.array([], 'int')
        masky = np.array([], 'int')
        lines = kepio.openascii(maskfile, 'r', logfile, verbose)
        for line in lines:
            line = line.strip().split('|')
            if len(line) == 6:
                y0 = int(line[3])
                x0 = int(line[4])
                line = line[5].split(';')
                for items in line:
                    try:
                        masky = np.append(masky, y0 + int(items.split(',')[0]))
                        maskx = np.append(maskx, x0 + int(items.split(',')[1]))
                    except:
                        continue
        kepio.closeascii(lines, logfile, verbose)
        if len(maskx) == 0 or len(masky) == 0:
            errmsg = 'ERROR -- KEPEXTRACT: {} contains no pixels.'.format(
                maskfile)
            kepmsg.err(logfile, errmsg, verbose)

    # subimage physical WCS data
    crpix1p = cards2['CRPIX1P'].value
    crpix2p = cards2['CRPIX2P'].value
    crval1p = cards2['CRVAL1P'].value
    crval2p = cards2['CRVAL2P'].value
    cdelt1p = cards2['CDELT1P'].value
    cdelt2p = cards2['CDELT2P'].value

    # define new subimage bitmap...
    if 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
        aperx = np.array([], 'int')
        apery = np.array([], 'int')
        aperb = np.array([], 'int')
        for i in range(maskmap.shape[0]):
            for j in range(maskmap.shape[1]):
                aperx = np.append(aperx, crval1p + (j + 1 - crpix1p) * cdelt1p)
                apery = np.append(apery, crval2p + (i + 1 - crpix2p) * cdelt2p)
                if maskmap[i, j] == 0:
                    aperb = np.append(aperb, 0)
                else:
                    aperb = np.append(aperb, 1)
                    maskmap[i, j] = 1
                    for k in range(len(maskx)):
                        if aperx[-1] == maskx[k] and apery[-1] == masky[k]:
                            aperb[-1] = 3
                            maskmap[i, j] = 3

    # trap case where no aperture needs to be defined but pixel positions are
    # still required for centroiding
    if maskfile.lower() == 'all':
        aperx = np.array([], 'int')
        apery = np.array([], 'int')
        aperb = np.array([], 'int')
        for i in range(maskmap.shape[0]):
            for j in range(maskmap.shape[1]):
                aperx = np.append(aperx, crval1p + (j + 1 - crpix1p) * cdelt1p)
                apery = np.append(apery, crval2p + (i + 1 - crpix2p) * cdelt2p)
                if maskmap[i, j] == 0:
                    aperb = np.append(aperb, 0)
                else:
                    aperb = np.append(aperb, 3)
                    maskmap[i, j] = 3

    # ...or use old subimage bitmap
    if 'aper' in maskfile.lower():
        aperx = np.array([], 'int')
        apery = np.array([], 'int')
        aperb = np.array([], 'int')
        for i in range(maskmap.shape[0]):
            for j in range(maskmap.shape[1]):
                aperb = np.append(aperb, maskmap[i, j])
                aperx = np.append(aperx, crval1p + (j + 1 - crpix1p) * cdelt1p)
                apery = np.append(apery, crval2p + (i + 1 - crpix2p) * cdelt2p)

    # subtract median pixel value for background?
    sky = np.zeros(len(time), 'float32')
    if bkg:
        for i in range(len(time)):
            sky[i] = np.median(flux[i, :])

    # legal mask defined?
    if len(aperb) == 0:
        errmsg = ('ERROR -- KEPEXTRACT: no legal pixels within the subimage'
                  ' are defined.')
        kepmsg.err(logfile, errmsg, verbose)

    # construct new table flux data
    naper = (aperb == 3).sum()
    ntime = len(time)
    sap_flux = np.array([], 'float32')
    sap_flux_err = np.array([], 'float32')
    sap_bkg = np.array([], 'float32')
    sap_bkg_err = np.array([], 'float32')
    raw_flux = np.array([], 'float32')
    print("Aperture photometry...")
    for i in tqdm(range(len(time))):
        work1 = np.array([], 'float64')
        work2 = np.array([], 'float64')
        work3 = np.array([], 'float64')
        work4 = np.array([], 'float64')
        work5 = np.array([], 'float64')
        for j in range(len(aperb)):
            if aperb[j] == 3:
                work1 = np.append(work1, flux[i, j] - sky[i])
                work2 = np.append(work2, flux_err[i, j])
                work3 = np.append(work3, flux_bkg[i, j])
                work4 = np.append(work4, flux_bkg_err[i, j])
                work5 = np.append(work5, raw_cnts[i, j])
        sap_flux = np.append(sap_flux, np.sum(work1))
        sap_flux_err = np.append(sap_flux_err,
                                 math.sqrt(np.sum(work2 * work2)))
        sap_bkg = np.append(sap_bkg, np.sum(work3))
        sap_bkg_err = np.append(sap_bkg_err, math.sqrt(np.sum(work4 * work4)))
        raw_flux = np.append(raw_flux, np.sum(work5))

    print("Sample moments...")
    # construct new table moment data
    for i in tqdm(range(ntime)):
        xf = np.zeros(shape=(naper))
        yf = np.zeros(shape=(naper))
        f = np.zeros(shape=(naper))
        xfe = np.zeros(shape=(naper))
        yfe = np.zeros(shape=(naper))
        fe = np.zeros(shape=(naper))
        k = -1
        for j in range(len(aperb)):
            if aperb[j] == 3:
                k += 1
                xf[k] = aperx[j] * flux[i, j]
                xfe[k] = aperx[j] * flux_err[i, j]
                yf[k] = apery[j] * flux[i, j]
                yfe[k] = apery[j] * flux_err[i, j]
                f[k] = flux[i, j]
                fe[k] = flux_err[i, j]
        xfsum = np.sum(xf)
        yfsum = np.sum(yf)
        fsum = np.sum(f)
        xfsume = math.sqrt(np.sum(xfe * xfe) / naper)
        yfsume = math.sqrt(np.sum(yfe * yfe) / naper)
        fsume = math.sqrt(np.sum(fe * fe) / naper)
        mom_centr1[i] = xfsum / fsum
        mom_centr2[i] = yfsum / fsum
        mom_centr1_err[i] = math.sqrt((xfsume / xfsum)**2 +
                                      ((fsume / fsum)**2))
        mom_centr2_err[i] = math.sqrt((yfsume / yfsum)**2 +
                                      ((fsume / fsum)**2))
    mom_centr1_err = mom_centr1_err * mom_centr1
    mom_centr2_err = mom_centr2_err * mom_centr2

    if psfcentroid:
        print("PSF Centroiding...")
        # construct new table PSF data
        psf_centr1 = np.zeros(shape=(ntime))
        psf_centr2 = np.zeros(shape=(ntime))
        psf_centr1_err = np.zeros(shape=(ntime))
        psf_centr2_err = np.zeros(shape=(ntime))
        modx = np.zeros(shape=(naper))
        mody = np.zeros(shape=(naper))
        k = -1
        for j in range(len(aperb)):
            if (aperb[j] == 3):
                k += 1
                modx[k] = aperx[j]
                mody[k] = apery[j]
        for i in tqdm(range(ntime)):
            modf = np.zeros(shape=(naper))
            k = -1
            guess = [
                mom_centr1[i], mom_centr2[i],
                np.nanmax(flux[i:]), 1.0, 1.0, 0.0, 0.0
            ]
            for j in range(len(aperb)):
                if (aperb[j] == 3):
                    k += 1
                    modf[k] = flux[i, j]
                    args = (modx, mody, modf)
            try:
                ans = leastsq(kepfunc.PRFgauss2d,
                              guess,
                              args=args,
                              xtol=1.0e-8,
                              ftol=1.0e-4,
                              full_output=True)
                s_sq = (ans[2]['fvec']**2).sum() / (ntime - len(guess))
                psf_centr1[i] = ans[0][0]
                psf_centr2[i] = ans[0][1]
            except:
                pass
            try:
                psf_centr1_err[i] = sqrt(diag(ans[1] * s_sq))[0]
            except:
                psf_centr1_err[i] = np.nan
            try:
                psf_centr2_err[i] = sqrt(diag(ans[1] * s_sq))[1]
            except:
                psf_centr2_err[i] = np.nan

    # construct output primary extension
    hdu0 = pyfits.PrimaryHDU()
    for i in range(len(cards0)):
        if cards0[i].keyword not in hdu0.header.keys():
            hdu0.header[cards0[i].keyword] = (cards0[i].value,
                                              cards0[i].comment)
        else:
            hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment
    kepkey.history(call, hdu0, outfile, logfile, verbose)
    outstr = pyfits.HDUList(hdu0)

    # construct output light curve extension
    col1 = pyfits.Column(name='TIME',
                         format='D',
                         unit='BJD - 2454833',
                         array=time)
    col2 = pyfits.Column(name='TIMECORR', format='E', unit='d', array=timecorr)
    col3 = pyfits.Column(name='CADENCENO', format='J', array=cadenceno)
    col4 = pyfits.Column(name='SAP_FLUX', format='E', array=sap_flux)
    col5 = pyfits.Column(name='SAP_FLUX_ERR', format='E', array=sap_flux_err)
    col6 = pyfits.Column(name='SAP_BKG', format='E', array=sap_bkg)
    col7 = pyfits.Column(name='SAP_BKG_ERR', format='E', array=sap_bkg_err)
    col8 = pyfits.Column(name='PDCSAP_FLUX', format='E', array=sap_flux)
    col9 = pyfits.Column(name='PDCSAP_FLUX_ERR',
                         format='E',
                         array=sap_flux_err)
    col10 = pyfits.Column(name='SAP_QUALITY', format='J', array=quality)
    col11 = pyfits.Column(name='PSF_CENTR1',
                          format='E',
                          unit='pixel',
                          array=psf_centr1)
    col12 = pyfits.Column(name='PSF_CENTR1_ERR',
                          format='E',
                          unit='pixel',
                          array=psf_centr1_err)
    col13 = pyfits.Column(name='PSF_CENTR2',
                          format='E',
                          unit='pixel',
                          array=psf_centr2)
    col14 = pyfits.Column(name='PSF_CENTR2_ERR',
                          format='E',
                          unit='pixel',
                          array=psf_centr2_err)
    col15 = pyfits.Column(name='MOM_CENTR1',
                          format='E',
                          unit='pixel',
                          array=mom_centr1)
    col16 = pyfits.Column(name='MOM_CENTR1_ERR',
                          format='E',
                          unit='pixel',
                          array=mom_centr1_err)
    col17 = pyfits.Column(name='MOM_CENTR2',
                          format='E',
                          unit='pixel',
                          array=mom_centr2)
    col18 = pyfits.Column(name='MOM_CENTR2_ERR',
                          format='E',
                          unit='pixel',
                          array=mom_centr2_err)
    col19 = pyfits.Column(name='POS_CORR1',
                          format='E',
                          unit='pixel',
                          array=pos_corr1)
    col20 = pyfits.Column(name='POS_CORR2',
                          format='E',
                          unit='pixel',
                          array=pos_corr2)
    col21 = pyfits.Column(name='RAW_FLUX', format='E', array=raw_flux)
    cols = pyfits.ColDefs([
        col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11,
        col12, col13, col14, col15, col16, col17, col18, col19, col20, col21
    ])
    hdu1 = pyfits.BinTableHDU.from_columns(cols)
    hdu1.header['TTYPE1'] = ('TIME', 'column title: data time stamps')
    hdu1.header['TFORM1'] = ('D', 'data type: float64')
    hdu1.header['TUNIT1'] = ('BJD - 2454833',
                             'column units: barycenter corrected JD')
    hdu1.header['TDISP1'] = ('D12.7', 'column display format')
    hdu1.header['TTYPE2'] = ('TIMECORR',
                             'column title: barycentric-timeslice correction')
    hdu1.header['TFORM2'] = ('E', 'data type: float32')
    hdu1.header['TUNIT2'] = ('d', 'column units: days')
    hdu1.header['TTYPE3'] = ('CADENCENO',
                             'column title: unique cadence number')
    hdu1.header['TFORM3'] = ('J', 'column format: signed integer32')
    hdu1.header['TTYPE4'] = ('SAP_FLUX',
                             'column title: aperture photometry flux')
    hdu1.header['TFORM4'] = ('E', 'column format: float32')
    hdu1.header['TUNIT4'] = ('e-/s', 'column units: electrons per second')
    hdu1.header['TTYPE5'] = ('SAP_FLUX_ERR',
                             'column title: aperture phot. flux error')
    hdu1.header['TFORM5'] = ('E', 'column format: float32')
    hdu1.header['TUNIT5'] = ('e-/s',
                             'column units: electrons per second (1-sigma)')
    hdu1.header['TTYPE6'] = ('SAP_BKG',
                             'column title: aperture phot. background flux')
    hdu1.header['TFORM6'] = ('E', 'column format: float32')
    hdu1.header['TUNIT6'] = ('e-/s', 'column units: electrons per second')
    hdu1.header['TTYPE7'] = ('SAP_BKG_ERR',
                             'column title: ap. phot. background flux error')
    hdu1.header['TFORM7'] = ('E', 'column format: float32')
    hdu1.header['TUNIT7'] = ('e-/s',
                             'column units: electrons per second (1-sigma)')
    hdu1.header['TTYPE8'] = ('PDCSAP_FLUX',
                             'column title: PDC photometry flux')
    hdu1.header['TFORM8'] = ('E', 'column format: float32')
    hdu1.header['TUNIT8'] = ('e-/s', 'column units: electrons per second')
    hdu1.header['TTYPE9'] = ('PDCSAP_FLUX_ERR', 'column title: PDC flux error')
    hdu1.header['TFORM9'] = ('E', 'column format: float32')
    hdu1.header['TUNIT9'] = ('e-/s',
                             'column units: electrons per second (1-sigma)')
    hdu1.header['TTYPE10'] = ('SAP_QUALITY',
                              'column title: aperture photometry quality flag')
    hdu1.header['TFORM10'] = ('J', 'column format: signed integer32')
    hdu1.header['TTYPE11'] = ('PSF_CENTR1',
                              'column title: PSF fitted column centroid')
    hdu1.header['TFORM11'] = ('E', 'column format: float32')
    hdu1.header['TUNIT11'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE12'] = ('PSF_CENTR1_ERR',
                              'column title: PSF fitted column error')
    hdu1.header['TFORM12'] = ('E', 'column format: float32')
    hdu1.header['TUNIT12'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE13'] = ('PSF_CENTR2',
                              'column title: PSF fitted row centroid')
    hdu1.header['TFORM13'] = ('E', 'column format: float32')
    hdu1.header['TUNIT13'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE14'] = ('PSF_CENTR2_ERR',
                              'column title: PSF fitted row error')
    hdu1.header['TFORM14'] = ('E', 'column format: float32')
    hdu1.header['TUNIT14'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE15'] = ('MOM_CENTR1',
                              'column title: moment-derived column centroid')
    hdu1.header['TFORM15'] = ('E', 'column format: float32')
    hdu1.header['TUNIT15'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE16'] = ('MOM_CENTR1_ERR',
                              'column title: moment-derived column error')
    hdu1.header['TFORM16'] = ('E', 'column format: float32')
    hdu1.header['TUNIT16'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE17'] = ('MOM_CENTR2',
                              'column title: moment-derived row centroid')
    hdu1.header['TFORM17'] = ('E', 'column format: float32')
    hdu1.header['TUNIT17'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE18'] = ('MOM_CENTR2_ERR',
                              'column title: moment-derived row error')
    hdu1.header['TFORM18'] = ('E', 'column format: float32')
    hdu1.header['TUNIT18'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE19'] = ('POS_CORR1',
                              'column title: col correction for vel. abbern')
    hdu1.header['TFORM19'] = ('E', 'column format: float32')
    hdu1.header['TUNIT19'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE20'] = ('POS_CORR2',
                              'column title: row correction for vel. abbern')
    hdu1.header['TFORM20'] = ('E', 'column format: float32')
    hdu1.header['TUNIT20'] = ('pixel', 'column units: pixel')
    hdu1.header['TTYPE21'] = ('RAW_FLUX',
                              'column title: raw aperture photometry flux')
    hdu1.header['TFORM21'] = ('E', 'column format: float32')
    hdu1.header['TUNIT21'] = ('e-/s', 'column units: electrons per second')
    hdu1.header['EXTNAME'] = ('LIGHTCURVE', 'name of extension')

    for i in range(len(cards1)):
        if (cards1[i].keyword not in hdu1.header.keys()
                and cards1[i].keyword[:4] not in [
                    'TTYP', 'TFOR', 'TUNI', 'TDIS', 'TDIM', 'WCAX', '1CTY',
                    '2CTY', '1CRP', '2CRP', '1CRV', '2CRV', '1CUN', '2CUN',
                    '1CDE', '2CDE', '1CTY', '2CTY', '1CDL', '2CDL', '11PC',
                    '12PC', '21PC', '22PC'
                ]):
            hdu1.header[cards1[i].keyword] = (cards1[i].value,
                                              cards1[i].comment)
    outstr.append(hdu1)

    # construct output mask bitmap extension
    hdu2 = pyfits.ImageHDU(maskmap)
    for i in range(len(cards2)):
        if cards2[i].keyword not in hdu2.header.keys():
            hdu2.header[cards2[i].keyword] = (cards2[i].value,
                                              cards2[i].comment)
        else:
            hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment
    outstr.append(hdu2)

    # write output file
    outstr.writeto(outfile, checksum=True)
    # close input structure
    instr.close()
    # end time
    kepmsg.clock('KEPEXTRACT finished at', logfile, verbose)
Example #11
0
    def createOutput(self):
        """Create pyfits object for output file."""

        # Get header info from the input.
        ifd = fits.open(self.input[self.index_max_nelem], mode="copyonwrite")
        detector = ifd[0].header["detector"]

        primary_hdu = fits.PrimaryHDU(header=ifd[0].header)
        cosutil.updateFilename(primary_hdu.header, self.output)
        # Add new keywords with comma-separated values.
        list_keywords = [("MFPPOS", "fppos", self.keywords["fppos"]),
                         ("MFPOFSET", "fpoffset", self.keywords["fpoffset"]),
                         ("MCENWAVE", "cenwave", self.keywords["cenwave"])]
        for (new_kwd, old_kwd, list_value) in list_keywords:
            str_value = makeStringList(list_value)
            primary_hdu.header.set(new_kwd, str_value, after=old_kwd)  # xxx
            # xxx primary_hdu.header.insert(old_kwd, (new_kwd, str_value),
            # xxx                           after=True)
        del_these = ["segment", "wavecals", "fppos", "fpoffset"]
        for keyword in del_these:
            if keyword in primary_hdu.header:
                del (primary_hdu.header[keyword])
        if len(self.keywords["cenwave"]) > 1:
            del (primary_hdu.header["cenwave"])
        ofd = fits.HDUList(primary_hdu)

        rpt = str(self.output_nelem)  # used for defining columns

        # Define the columns explicitly, rather than using an input table
        # as a template and then modifying the lengths of arrays (see below),
        # because the modified columns kept reverting to the original length.
        col = []
        col.append(fits.Column(name="SEGMENT", format="4A"))
        col.append(
            fits.Column(name="EXPTIME", format="1D", disp="F8.3", unit="s"))
        col.append(fits.Column(name="NELEM", format="1J", disp="I6"))
        col.append(
            fits.Column(name="WAVELENGTH", format=rpt + "D", unit="angstrom"))
        col.append(
            fits.Column(name="FLUX",
                        format=rpt + "E",
                        unit="erg /s /cm**2 /angstrom"))
        col.append(
            fits.Column(name="ERROR",
                        format=rpt + "E",
                        unit="erg /s /cm**2 /angstrom"))
        col.append(fits.Column(name="GROSS", format=rpt + "E",
                               unit="count /s"))
        col.append(fits.Column(name="GCOUNTS", format=rpt + "E", unit="count"))
        col.append(fits.Column(name="NET", format=rpt + "E", unit="count /s"))
        col.append(
            fits.Column(name="BACKGROUND", format=rpt + "E", unit="count /s"))
        col.append(fits.Column(name="DQ", format=rpt + "I"))
        col.append(fits.Column(name="DQ_WGT", format=rpt + "E"))
        cd = fits.ColDefs(col)

        # Modify some of the output columns.
        #cd = ifd[1].columns             # this is a ColDefs object
        #col_names = cd.names
        #col_formats = cd.formats
        #ncols = len(col_names)
        # xxx x = ifd[1].data                 # xxx touch the data
        #for i in range(ncols):
        #    fmt = col_formats[i]
        #    if fmt[-1] in ["D", "E", "I", "J"] and fmt[0] in "123456789":
        #        x = ifd[1].data         # xxx touch the data
        #        newfmt = rpt + fmt[-1]
        #        cd.change_attrib(col_names[i], "format", newfmt)

        # Create output HDU for the table.
        newhdu = delExtraColumns(ifd[1])
        ifd[1].header = newhdu.header
        hdu = fits.BinTableHDU.from_columns(cd,
                                            header=ifd[1].header,
                                            nrows=self.nrows)

        hdu.header["exptime"] = self.keywords["exptime"]
        if detector == "FUV":
            hdu.header["exptimea"] = self.keywords["exptimea"]
            hdu.header["exptimeb"] = self.keywords["exptimeb"]

        hdu.header["expstart"] = self.keywords["expstart"]
        hdu.header["expend"] = self.keywords["expend"]
        hdu.header["expstrtj"] = self.keywords["expstrtj"]
        hdu.header["expendj"] = self.keywords["expendj"]
        hdu.header["plantime"] = self.keywords["plantime"]
        if self.keywords["globrate"] >= 0.:
            hdu.header["globrate"] = round(self.keywords["globrate"], 4)
        if self.keywords["globrt_a"] >= 0.:
            hdu.header["globrt_a"] = round(self.keywords["globrt_a"], 4)
        if self.keywords["globrt_b"] >= 0.:
            hdu.header["globrt_b"] = round(self.keywords["globrt_b"], 4)

        # Delete some keywords because they are specific to one exposure.
        delSomeKeywords(hdu.header)

        ofd.append(hdu)
        self.fpInitData(ofd)  # initialize data in output hdu

        ifd.close()

        self.ofd = ofd
Example #12
0
def file_output(data_array, file_name):
    fits.BinTableHDU.from_columns(fits.ColDefs(np.array(data_array))).writeto(
        file_name, overwrite=True)
def write_catalogs_to_disk(outputfile, outputdataarray, literaturecontent=True, verbose=True):
    """

    """
    outputdataarray = np.sort(outputdataarray, order='id') # sorting array by ID
    outputdir = '/Users/kschmidt/work/publications/MUSE_UVemissionlines/catalog_releases/'
    outtxt    = outputdir+outputfile
    outfits   = outputdir+outputfile.replace('.txt','.fits')
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print(' - Initializing the output file: \n   '+outtxt)
    fout = open(outtxt,'w')
    if verbose: print('   Putting together header for ascii output')
    fout.write('# Value-added source catalog released with Schmidt et al. (2021) A&A XXXX:YYYY \n')
    fout.write('# Upper and lower limits are given as values with uncertainty of +99 or -99, respectively. \n')
    if literaturecontent:
        fout.write('# When using this catalog please cite Schmidt et al. (2021) A&A XXXX:YYYY '
                   'and the original papers the data were assmbled from in the reference column (see Schmidt et al. 2021) \n')
    else:
        fout.write('# When using this catalog please cite Schmidt et al. (2021) A&A XXXX:YYYY \n')
    fout.write('# \n')
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    colnames     = outputdataarray.dtype.names
    Ncols        = len(colnames)
    if verbose: print('   The output files will contain '+str(Ncols)+' columns ')
    fout.write('# The catalog contains the following '+str(Ncols)+' columns:\n')
    fout.write('# '+(' '.join([str("%20s" % colname) for colname in list(colnames)])).replace('reference','                    reference')+'  \n')
    if verbose: print('   The total number of objects in the output is '+str(len(outputdataarray['id'])))
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if verbose: print(' - Writing output data array to ascii file \n   '+outputfile)
    dtypetranslator = {'>i8':'%20i',  'float64':'%20.4f', '|S30':'%30s'}

    for oo, id in enumerate(outputdataarray['id']):
        outstr = ' '
        for cc, colval in enumerate(outputdataarray[oo].tolist()):
            strfmt = dtypetranslator[str(outputdataarray.dtype[cc])]
            if outputdataarray.dtype.names[cc] in  ['ra','dec']:
                strfmt = '%20.10f'

            if strfmt == '%30s':
                outstr = outstr +' '+ str(strfmt % colval.decode('UTF-8'))
            else:
                outstr = outstr +' '+ str(strfmt % colval)

        fout.write(outstr+' \n')
    fout.close()

    #-------------------------------------------------------------------------------------------------------------
    if verbose: print(' - Creating fits version of output: \n   '+outfits)
    if literaturecontent:
        fitsformat = ['K','D','D','20A','30A','D'] + ['D']*(Ncols-6)
    else:
        fitsformat      = ['K','D','D','D','10A','K','K'] + ['D']*(Ncols-15) + ['K', 'D']*4
        fitsformat[-19] = 'K' # The Kerutt ID
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if verbose: print('   Reading ascii file ')
    data    = np.genfromtxt(outtxt,names=colnames,skip_header=6,comments='#',dtype=outputdataarray.dtype)
    keys    = data.dtype.names
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if verbose: print('   Initialize and fill dictionary with data')
    datadic = {}
    for kk in keys:
        datadic[kk] = []
        try:
            lenarr = len(np.asarray(data[kk]))
            datadic[kk] = np.asarray(data[kk])
        except: # if only one row of data is to be written
            datadic[kk] = np.asarray([data[kk]])

    if verbose: print('   Found '+str(len(keys))+' columns to insert into fits binary table')

    if len(fitsformat) != len(keys):
        fitsformat = np.asarray([fitsformat]*len(keys))
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if verbose: print('   Writing to fits table')
    columndefs = []
    for kk, key in enumerate(keys):
        columndefs.append(afits.Column(name=key  , format=fitsformat[kk], unit=uca.get_unit(key), array=datadic[key]))

    cols     = afits.ColDefs(columndefs)
    tbhdu    = afits.BinTableHDU.from_columns(cols)  # creating table header
    hdu      = afits.PrimaryHDU()                    # creating primary (minimal) header
    thdulist = afits.HDUList([hdu, tbhdu])           # combine primary and table header to hdulist
    thdulist.writeto(outfits,overwrite=True)      # write fits file
Example #14
0
aaa = orig_cols.del_col('CALIBFLUX')
aaa = orig_cols.del_col('CALIBFLUX_IVAR')

table_all = []
headers = ""
for fiber, mjd in zip(orig_table['FIBERID'], orig_table['MJD']):
    fitFile = join(
        os.environ['EBOSSDR14_DIR'], dir, "stellarpop", plate, "spFly-" +
        plate.zfill(4) + "-" + str(mjd) + "-" + str(fiber).zfill(4) + suffix)
    # print fitFile
    if os.path.isfile(fitFile):
        table_entry, headers = get_table_entry_full(
            hduSPM=fits.open(fitFile)[1])
        table_all.append(table_entry)
    else:
        table_all.append(n.ones(66) * dV)

newDat = n.transpose(table_all)

all_cols = []
for data_array, head in zip(newDat, headers.split()):
    all_cols.append(fits.Column(name=head, format='D', array=data_array))

new_cols = fits.ColDefs(all_cols)
hdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
if os.path.isfile(plate_catalog):
    os.remove(plate_catalog)

hdu.writeto(plate_catalog)
    aexps.append(aaa)

out_name = os.path.join(os.environ["MD40"], 'output_MD_4.0Gpc.fits')

redshift = 1. / n.array(aexps) - 1.
dCom = cosmoMD.comoving_distance(redshift)
ids = n.argsort(redshift)
col0 = fits.Column(name='snap_name', format='A4', array=n.array(names)[ids])
col1 = fits.Column(name='N_columns', format='I', array=n.array(colN)[ids])
col2 = fits.Column(name='aexp', format='D', array=n.array(aexps)[ids])
col3 = fits.Column(name='redshift', format='D', array=redshift[ids])
col4 = fits.Column(name='comoving_distance', format='D', array=dCom.value[ids])
array = 10**9 * cosmoMD.age(redshift).value
col5 = fits.Column(name='age_yr', format='D', array=array[ids])
array = cosmoMD.arcsec_per_kpc_comoving(redshift).value * 3.6
col6 = fits.Column(name='deg_per_Mpc_comoving', format='D', array=array[ids])

#define the table hdu
hdu_cols = fits.ColDefs([col0, col1, col2, col3, col4, col5,
                         col6])  #, col7, col8, col9, col10])
tb_hdu = fits.BinTableHDU.from_columns(hdu_cols)
#define the header
prihdr = fits.Header()
prihdr['sim'] = 'HMD'
prihdr['author'] = 'JC'
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
#os.system("rm "+out_name)
thdulist.writeto(out_name)
Example #16
0
    def _make_target_extension(self):
        """Create the 'TARGETTABLES' extension (i.e. extension #1)."""
        # Turn the data arrays into fits columns and initialize the HDU
        coldim = '({},{})'.format(self.n_cols, self.n_rows)
        eformat = '{}E'.format(self.n_rows * self.n_cols)
        jformat = '{}J'.format(self.n_rows * self.n_cols)
        cols = []
        cols.append(
            fits.Column(name='TIME',
                        format='D',
                        unit='BJD - 2454833',
                        array=self.time))
        cols.append(
            fits.Column(name='TIMECORR',
                        format='E',
                        unit='D',
                        array=self.timecorr))
        cols.append(
            fits.Column(name='CADENCENO', format='J', array=self.cadenceno))
        cols.append(
            fits.Column(name='RAW_CNTS',
                        format=jformat,
                        unit='count',
                        dim=coldim,
                        array=self.raw_cnts))
        cols.append(
            fits.Column(name='FLUX',
                        format=eformat,
                        unit='e-/s',
                        dim=coldim,
                        array=self.flux))
        cols.append(
            fits.Column(name='FLUX_ERR',
                        format=eformat,
                        unit='e-/s',
                        dim=coldim,
                        array=self.flux_err))
        cols.append(
            fits.Column(name='FLUX_BKG',
                        format=eformat,
                        unit='e-/s',
                        dim=coldim,
                        array=self.flux_bkg))
        cols.append(
            fits.Column(name='FLUX_BKG_ERR',
                        format=eformat,
                        unit='e-/s',
                        dim=coldim,
                        array=self.flux_bkg_err))
        cols.append(
            fits.Column(name='COSMIC_RAYS',
                        format=eformat,
                        unit='e-/s',
                        dim=coldim,
                        array=self.cosmic_rays))
        cols.append(fits.Column(name='QUALITY', format='J',
                                array=self.quality))
        cols.append(
            fits.Column(name='POS_CORR1',
                        format='E',
                        unit='pixels',
                        array=self.pos_corr1))
        cols.append(
            fits.Column(name='POS_CORR2',
                        format='E',
                        unit='pixels',
                        array=self.pos_corr2))
        coldefs = fits.ColDefs(cols)
        hdu = fits.BinTableHDU.from_columns(coldefs)

        # Set the header with defaults
        template = self._header_template(1)
        for kw in template:
            if kw not in [
                    'XTENSION', 'NAXIS1', 'NAXIS2', 'CHECKSUM', 'BITPIX'
            ]:
                try:
                    hdu.header[kw] = (self.keywords[kw],
                                      self.keywords.comments[kw])
                except KeyError:
                    hdu.header[kw] = (template[kw], template.comments[kw])

        # Override the defaults where necessary
        hdu.header['EXTNAME'] = 'TARGETTABLES'
        hdu.header['OBJECT'] = self.target_id
        hdu.header['KEPLERID'] = self.target_id
        for n in [5, 6, 7, 8, 9]:
            hdu.header["TFORM{}".format(n)] = eformat
            hdu.header["TDIM{}".format(n)] = coldim
        hdu.header['TFORM4'] = jformat
        hdu.header['TDIM4'] = coldim

        return hdu
Example #17
0
def toPixels(args):
    """
    Convert coordinates from pixels.
    Assumes HSC pixel size
    """

    verbose = True

    """ options
    """
    pixelScale = 0.17
    fileInName = args.input.split(",")

    """ read input file
    """
    cols, _ = getCols(fileInName[0], ['user_ra', 'user_dec', 'ra', 'dec'], dictionary=True)
    # coord = SkyCoord(cols['ra'], cols['dec'], unit="deg")
    N = len(cols['ra'])

    """ create wcs object
    """
    w = createZeroWcs(pixelScale, 0.0, 0.0)

    x = np.zeros(N)
    y = np.zeros(N)

    w.wcs.crval = [cols['user_ra'][0], cols['user_dec'][0]]
    count = 0

    """ loop over all objects
    but do the conversion in blocks sharing the
    same nearby star
    """

    # count = 0
    for i in range(N):
        w.wcs.crval = [cols['user_ra'][i], cols['user_dec'][i]]
        x[i], y[i] = w.wcs_world2pix(cols['ra'][i], cols['dec'][i], 1)

        # try:
        #    np.testing.assert_array_almost_equal(w.wcs.crval, [cols['user_ra'][i], cols['user_dec'][i]], decimal=4)
        # except:
        #    x[i-count:i+1], y[i-count:i+1] = w.wcs_world2pix(cols['ra'][i-count:i+1], cols['dec'][i-count:i+1], 1)
        #    w.wcs.crval = [cols['user_ra'][i], cols['user_dec'][i]]
        #    count = 0
        # count += 1

        #try:
        #    np.testing.assert_array_almost_equal(w.wcs.crval, [cols['user_ra'][i], cols['user_dec'][i]], decimal=4)
        #except:
        #    x[i-count:i+1], y[i-count:i+1] = wcs.utils.skycoord_to_pixel(coord[i-count:i+1], w, origin=1, mode='wcs')
        #    w.wcs.crval = [cols['user_ra'][i], cols['user_dec'][i]]
        #    count = 0

        #count += 1

        if verbose:
            if (i+1)%10000 == 0:
                sys.stderr.write("\r" + "Processed {0:d}/{1:d} objects ({2:.2f}%)".format(i+1, N, 100.0*float(i+1)/float(N)))
                sys.stderr.flush()

    # count -= 1
    # x[i-count:i+1], y[i-count:i+1] = w.wcs_world2pix(cols['ra'][i-count:N], cols['dec'][i-count:N], 1)

    #count -= 1
    #x[i-count:N], y[i-count:N] = wcs.utils.skycoord_to_pixel(coord[i-count:N], w, origin=1)

    if verbose: sys.stderr.write("\r" + "Processed {0:d}/{1:d} objects ({2:.2f}%)\n".format(i+1, N, 100.0))

    outCols = [fits.Column(name='x', format='E', array=x), fits.Column(name='y', format='E', array=y)]

    fileIn = fits.open(fileInName[0])
    tbhdu  = fits.HDUList([fileIn[0], fits.BinTableHDU.from_columns(fileIn[1].columns + fits.ColDefs(outCols))])
    tbhdu.writeto(args.output, clobber=True)

    fileIn.close()

    return
    for sub_survey_name in sub_survey_names
])
ssm_files = np.array([
    survey + '_SSM_' + str(sub_survey_name) + '.fits'
    for sub_survey_name in sub_survey_names
])
tmax_files = np.array([
    survey + '_TMAX_' + str(sub_survey_name) + '.fits'
    for sub_survey_name in sub_survey_names
])
areq_files = np.array([
    survey + '_AREQ_' + str(sub_survey_name) + '.fits'
    for sub_survey_name in sub_survey_names
])

out_file_PARA = os.path.join(working_dir, survey + '_SUBSURVEY_PARAMS.fits')

cols = fits.ColDefs([
    fits.Column("SUBSURVEY", unit='', format="20A", array=sub_survey_names),
    fits.Column("LSM_FILENAME", unit='', format="200A", array=lsm_files),
    fits.Column("SSM_FILENAME", unit='', format="200A", array=ssm_files),
    fits.Column("TMAX_FILENAME", unit='', format="200A", array=tmax_files),
    fits.Column("AREQ_FILENAME", unit='', format="200A", array=areq_files),
])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.header['author'] = 'JC'
print(out_file_PARA)
if os.path.isfile(out_file_PARA):
    os.system("rm " + out_file_PARA)
tbhdu.writeto(out_file_PARA)
Example #19
0
def kepfold(infile,
            outfile,
            period,
            bjd0,
            bindata=False,
            binmethod='median',
            threshold=1.0,
            niter=5,
            nbins=1000,
            rejqual=False,
            plottype='sap',
            overwrite=False,
            verbose=False,
            logfile="kepfold.log"):
    """
    kepfold: Phase-fold light curve data on linear ephemeris.

    kepfold calculates the phase of all time-tagged data points relative to a
    user-supplied linear ephemeris. The relation is:

    .. math::

        TIME_i = bjd0 + period \cdot PHASE_i

    :math:`TIME` is the column within the FITS light curve file containing
    barycenter-corrected time stamps. :math:`bjd0` is a user-supplied BJD for
    zero phase. period is a user-supplied period in units of days. PHASE is the
    calculated phase for each time stamp; these results are written to a new
    float column in the LIGHT CURVE extension of the input file before being
    exported as a new file with name defined by the user. Optionally, kepfold
    will plot the data folded on the ephemeris and store it within a new FITS
    extension of the output file called FOLDED. Both the SAP and PDC fluxes are
    binned and stored in the new extension. There are a number of binning
    algorithms, mean, median and sigma clipping. The user has options to adapt
    bin size, binning method and the rejection of outliers.

    Parameters
    ----------
    inile : str
        The name of a MAST standard format FITS file containing a Kepler light
        curve within the first data extension.
    outfile : str
        The name of the output FITS file with a new extension containing a
        phased light curve.
    period : str
        Period over which to fold the light curve, in units of days.
    bjd0 : float
        Time of zero phase for the folded data, in units of BJD.
    bindata: bool
        Collect the data into discrete bins during the fold?
    binmethod : str
        Binning method.

        * `mean` calculates the mean of all data points contained within a bin.
        * `median` calculates the median of all data points within a bin.
        * `sigclip` calculates a mean iteratively. Each iteration rejects data
          lying further than a threshold number of standard deviations from the
          mean before recalculating the result.

    threshold : float
        The sigma clipping threshold in units of the standard deviation about
        the calculated mean within a phase bin. A typical outlier
        lies > 3.0:math:`\sigma` from the mean.
    niter : int
        The maximum number of iterations over which to reject outliers before
        accepting the sigclip result.
    nbins : int
        The number of phase bins to calculate.
    rejqual : bool
        If `True`, timestamps with quality issues recorded as a finite quality
        flag in the input file will be thrown away before folding the data.
    plottype : str
        The type of data to plot. The choices refer to the types of photometry
        stored in the input file.

        * ``sap`` is Simple Aperture Photometry, stored in the column,
          SAP_FLUX. SAP data is generated by the Kepler pipeline but it can also
          be generated from a target pixel file using the kepextract tool.
        * ``pdc`` is Pre-search Data Conditioning photometry, stored in the
          column PDCSAP_FLUX. PDC data is a Kepler pipeline product.
        * ``cbv`` Cotrending Basis Vector is SAP photometry corrected manually
          by the user using the tool ``kepcotrend``. CBV data is stored in the
          column CBVSAP_FLUX.
        * ``det`` data has been detrended using piecemeal polynomials with the
          kepflatten tool. DET data is stored in the column DETSAP_FLUX.

    overwrite : bool
        Overwrite the output file?
    verbose : bool
        Print informative messages and warnings to the shell and logfile?
    logfile : str
        Name of the logfile containing error and warning messages.

    Examples
    --------
    .. code-block:: bash

        $ kepfold kplr010544976-2009201121230_slc.fits kepfold.fits
          0.350471 2455002.825 --bindata --binmethod median --threshold 3.0
          --niter 1000 --plottype sap --verbose

    .. image:: ../_static/images/api/kepfold.png
        :align: center
    """

    # log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = ('KEPFOLD -- ' + ' infile={}'.format(infile) +
            ' outfile={}'.format(outfile) + ' period={}'.format(period) +
            ' bjd0={}'.format(bjd0) + ' bindata={}'.format(bindata) +
            ' binmethod={}'.format(binmethod) +
            ' threshold={}'.format(threshold) + ' niter={}'.format(niter) +
            ' nbins={}'.format(nbins) + ' rejqual={}'.format(rejqual) +
            ' plottype={}'.format(plottype) +
            ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) +
            ' logfile={}'.format(logfile))

    kepmsg.log(logfile, call + '\n', verbose)

    # start time
    kepmsg.clock('KEPFOLD started at', logfile, verbose)

    # overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = (
            'ERROR -- KEPFOLD: {} exists. Use --overwrite'.format(outfile))
        kepmsg.err(logfile, errmsg, verbose)

    # open input file
    instr = pyfits.open(infile, 'readonly')
    tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
                                                    verbose)
    try:
        work = instr[0].header['FILEVER']
        cadenom = 1.0
    except:
        cadenom = cadence

    # fudge non-compliant FITS keywords with no values
    instr = kepkey.emptykeys(instr, infile, logfile, verbose)

    # input data
    table = instr[1].data
    incards = instr[1].header.cards
    try:
        sap = instr[1].data.field('SAP_FLUX')
    except:
        try:
            sap = instr[1].data.field('ap_raw_flux')
        except:
            sap = np.zeros(len(table.field(0)))
    try:
        saperr = instr[1].data.field('SAP_FLUX_ERR')
    except:
        try:
            saperr = instr[1].data.field('ap_raw_err')
        except:
            saperr = np.zeros(len(table.field(0)))
    try:
        pdc = instr[1].data.field('PDCSAP_FLUX')
    except:
        try:
            pdc = instr[1].data.field('ap_corr_flux')
        except:
            pdc = np.zeros(len(table.field(0)))
    try:
        pdcerr = instr[1].data.field('PDCSAP_FLUX_ERR')
    except:
        try:
            pdcerr = instr[1].data.field('ap_corr_err')
        except:
            pdcerr = np.zeros(len(table.field(0)))
    try:
        cbv = instr[1].data.field('CBVSAP_FLUX')
    except:
        cbv = np.zeros(len(table.field(0)))
        if 'cbv' in plottype:
            errmsg = ("ERROR -- KEPFOLD: CBVSAP_FLUX column is not populated."
                      " Use kepcotrend")
            kepmsg.err(logfile, txt, verbose)
    try:
        det = instr[1].data.field('DETSAP_FLUX')
    except:
        det = np.zeros(len(table.field(0)))
        if 'det' in plottype:
            txt = ("ERROR -- KEPFOLD: DETSAP_FLUX column is not populated."
                   "Use kepflatten")
            kepmsg.err(logfile, txt, verbose)
    try:
        deterr = instr[1].data.field('DETSAP_FLUX_ERR')
    except:
        deterr = np.zeros(len(table.field(0)))
        if 'det' in plottype:
            txt = ("ERROR -- KEPFOLD: DETSAP_FLUX_ERR column is not populated."
                   " Use kepflatten.")
            kepmsg.err(logfile, txt, verbose)
    try:
        quality = instr[1].data.field('SAP_QUALITY')
    except:
        quality = np.zeros(len(table.field(0)))
        if qualflag:
            txt = 'WARNING -- KEPFOLD: Cannot find a QUALITY data column'
            kepmsg.warn(logfile, txt)
    barytime = kepio.readtimecol(infile, table, logfile, verbose)
    barytime1 = copy(barytime)

    # filter out NaNs and quality > 0
    work1, work2, work3, work4 = [], [], [], []
    work5, work6, work8, work9 = [], [], [], []
    if 'sap' in plottype:
        datacol = copy(sap)
        errcol = copy(saperr)
    if 'pdc' in plottype:
        datacol = copy(pdc)
        errcol = copy(pdcerr)
    if 'cbv' in plottype:
        datacol = copy(cbv)
        errcol = copy(saperr)
    if 'det' in plottype:
        datacol = copy(det)
        errcol = copy(deterr)
    for i in range(len(barytime)):
        if (np.isfinite(barytime[i]) and np.isfinite(datacol[i])
                and datacol[i] != 0.0 and np.isfinite(errcol[i])
                and errcol[i] > 0.0):
            if rejqual and quality[i] == 0:
                work1.append(barytime[i])
                work2.append(sap[i])
                work3.append(saperr[i])
                work4.append(pdc[i])
                work5.append(pdcerr[i])
                work6.append(cbv[i])
                work8.append(det[i])
                work9.append(deterr[i])
            elif not rejqual:
                work1.append(barytime[i])
                work2.append(sap[i])
                work3.append(saperr[i])
                work4.append(pdc[i])
                work5.append(pdcerr[i])
                work6.append(cbv[i])
                work8.append(det[i])
                work9.append(deterr[i])
    barytime = np.array(work1, dtype='float64')
    sap = np.array(work2, dtype='float32') / cadenom
    saperr = np.array(work3, dtype='float32') / cadenom
    pdc = np.array(work4, dtype='float32') / cadenom
    pdcerr = np.array(work5, dtype='float32') / cadenom
    cbv = np.array(work6, dtype='float32') / cadenom
    det = np.array(work8, dtype='float32') / cadenom
    deterr = np.array(work9, dtype='float32') / cadenom

    # calculate phase
    if bjd0 < bjdref:
        bjd0 += bjdref
    date1 = (barytime1 + bjdref - bjd0)
    phase1 = (date1 / period) - np.floor(date1 / period)
    date2 = (barytime + bjdref - bjd0)
    phase2 = (date2 / period) - np.floor(date2 / period)
    phase2 = np.array(phase2, 'float32')

    # sort phases
    ptuple = []
    phase3 = []
    sap3, saperr3 = [], []
    pdc3, pdcerr3 = [], []
    cbv3, cbverr3 = [], []
    det3, deterr3 = [], []
    for i in range(len(phase2)):
        ptuple.append([
            phase2[i], sap[i], saperr[i], pdc[i], pdcerr[i], cbv[i], saperr[i],
            det[i], deterr[i]
        ])
    phsort = sorted(ptuple, key=lambda ph: ph[0])
    for i in range(len(phsort)):
        phase3.append(phsort[i][0])
        sap3.append(phsort[i][1])
        saperr3.append(phsort[i][2])
        pdc3.append(phsort[i][3])
        pdcerr3.append(phsort[i][4])
        cbv3.append(phsort[i][5])
        cbverr3.append(phsort[i][6])
        det3.append(phsort[i][7])
        deterr3.append(phsort[i][8])
    phase3 = np.array(phase3, 'float32')
    sap3 = np.array(sap3, 'float32')
    saperr3 = np.array(saperr3, 'float32')
    pdc3 = np.array(pdc3, 'float32')
    pdcerr3 = np.array(pdcerr3, 'float32')
    cbv3 = np.array(cbv3, 'float32')
    cbverr3 = np.array(cbverr3, 'float32')
    det3 = np.array(det3, 'float32')
    deterr3 = np.array(deterr3, 'float32')

    # bin phases
    if bindata:
        work1 = np.array([sap3[0]], 'float32')
        work2 = np.array([saperr3[0]], 'float32')
        work3 = np.array([pdc3[0]], 'float32')
        work4 = np.array([pdcerr3[0]], 'float32')
        work5 = np.array([cbv3[0]], 'float32')
        work6 = np.array([cbverr3[0]], 'float32')
        work7 = np.array([det3[0]], 'float32')
        work8 = np.array([deterr3[0]], 'float32')
        phase4 = np.array([], 'float32')
        sap4 = np.array([], 'float32')
        saperr4 = np.array([], 'float32')
        pdc4 = np.array([], 'float32')
        pdcerr4 = np.array([], 'float32')
        cbv4 = np.array([], 'float32')
        cbverr4 = np.array([], 'float32')
        det4 = np.array([], 'float32')
        deterr4 = np.array([], 'float32')
        dt = 1.0 / nbins
        nb = 0.0
        rng = np.append(phase3, phase3[0] + 1.0)
        for i in range(len(rng)):
            if rng[i] < nb * dt or rng[i] >= (nb + 1.0) * dt:
                if len(work1) > 0:
                    phase4 = np.append(phase4, (nb + 0.5) * dt)
                    if binmethod == 'mean':
                        sap4 = np.append(sap4, np.nanmean(work1))
                        saperr4 = np.append(saperr4, kepstat.mean_err(work2))
                        pdc4 = np.append(pdc4, np.nanmean(work3))
                        pdcerr4 = np.append(pdcerr4, kepstat.mean_err(work4))
                        cbv4 = np.append(cbv4, np.nanmean(work5))
                        cbverr4 = np.append(cbverr4, kepstat.mean_err(work6))
                        det4 = np.append(det4, np.nanmean(work7))
                        deterr4 = np.append(deterr4, kepstat.mean_err(work8))
                    elif binmethod == 'median':
                        sap4 = np.append(sap4, np.nanmedian(work1))
                        saperr4 = np.append(saperr4, kepstat.mean_err(work2))
                        pdc4 = np.append(pdc4, np.nanmedian(work3))
                        pdcerr4 = np.append(pdcerr4, kepstat.mean_err(work4))
                        cbv4 = np.append(cbv4, np.nanmedian(work5))
                        cbverr4 = np.append(cbverr4, kepstat.mean_err(work6))
                        det4 = np.append(det4, np.nanmedian(work7))
                        deterr4 = np.append(deterr4, kepstat.mean_err(work8))
                    else:
                        coeffs, errors, covar, iiter, sigma, chi2, dof, fit, \
                        plotx, ploty = kepfit.lsqclip(kepfunc.poly0,
                                [np.nanmean(work1)],
                                np.arange(0.0, float(len(work1)), 1.0), work1,
                                work2, threshold, threshold, niter, logfile,
                                False)
                        sap4 = np.append(sap4, coeffs[0])
                        saperr4 = np.append(saperr4, kepstat.mean_err(work2))
                        coeffs, errors, covar, iiter, sigma, chi2, dof, fit, \
                        plotx, ploty = kepfit.lsqclip(kepfunc.poly0,
                                [np.nanmean(work3)],
                                np.arange(0.0, float(len(work3)), 1.0), work3,
                                work4, threshold, threshold, niter, logfile,
                                False)
                        pdc4 = np.append(pdc4, coeffs[0])
                        pdcerr4 = np.append(pdcerr4, kepstat.mean_err(work4))
                        coeffs, errors, covar, iiter, sigma, chi2, dof, fit, \
                        plotx, ploty = kepfit.lsqclip(kepfunc.poly0,
                                [np.nanmean(work5)],
                                np.arange(0.0, float(len(work5)), 1.0),
                                work5, work6, threshold, threshold, niter,
                                logfile, False)
                        cbv4 = np.append(cbv4, coeffs[0])
                        cbverr4 = np.append(cbverr4, kepstat.mean_err(work6))
                        coeffs, errors, covar, iiter, sigma, chi2, dof, fit, \
                        plotx, ploty = kepfit.lsqclip(kepfunc.poly0,
                                [np.nanmean(work7)],
                                np.arange(0.0, float(len(work7)), 1.0),
                                work7, work8, threshold, threshold, niter,
                                logfile, False)
                        det4 = np.append(det4, coeffs[0])
                        deterr4 = np.append(deterr4, kepstat.mean_err(work8))
                work1 = np.array([], 'float32')
                work2 = np.array([], 'float32')
                work3 = np.array([], 'float32')
                work4 = np.array([], 'float32')
                work5 = np.array([], 'float32')
                work6 = np.array([], 'float32')
                work7 = np.array([], 'float32')
                work8 = np.array([], 'float32')
                nb += 1.0
            else:
                work1 = np.append(work1, sap3[i])
                work2 = np.append(work2, saperr3[i])
                work3 = np.append(work3, pdc3[i])
                work4 = np.append(work4, pdcerr3[i])
                work5 = np.append(work5, cbv3[i])
                work6 = np.append(work6, cbverr3[i])
                work7 = np.append(work7, det3[i])
                work8 = np.append(work8, deterr3[i])

    # update HDU1 for output file
    cols = (instr[1].columns + pyfits.ColDefs(
        [pyfits.Column(name='PHASE', format='E', array=phase1)]))
    instr[1] = pyfits.BinTableHDU.from_columns(cols)
    instr[1].header.cards[
        'TTYPE' + str(len(instr[1].columns))].comment = 'column title: phase'
    instr[1].header.cards[
        'TFORM' + str(len(instr[1].columns))].comment = 'data type: float32'
    for i in range(len(incards)):
        if incards[i].keyword not in instr[1].header.keys():
            instr[1].header[incards[i].keyword] = (incards[i].value,
                                                   incards[i].comment)
        else:
            instr[1].header.cards[
                incards[i].keyword].comment = incards[i].comment
    instr[1].header['PERIOD'] = (period, 'period defining the phase [d]')
    instr[1].header['BJD0'] = (bjd0, 'time of phase zero [BJD]')

    # write new phased data extension for output file
    if bindata:
        col1 = pyfits.Column(name='PHASE', format='E', array=phase4)
        col2 = pyfits.Column(name='SAP_FLUX',
                             format='E',
                             unit='e/s',
                             array=sap4 / cadenom)
        col3 = pyfits.Column(name='SAP_FLUX_ERR',
                             format='E',
                             unit='e/s',
                             array=saperr4 / cadenom)
        col4 = pyfits.Column(name='PDC_FLUX',
                             format='E',
                             unit='e/s',
                             array=pdc4 / cadenom)
        col5 = pyfits.Column(name='PDC_FLUX_ERR',
                             format='E',
                             unit='e/s',
                             array=pdcerr4 / cadenom)
        col6 = pyfits.Column(name='CBV_FLUX',
                             format='E',
                             unit='e/s',
                             array=cbv4 / cadenom)
        col7 = pyfits.Column(name='DET_FLUX', format='E', array=det4 / cadenom)
        col8 = pyfits.Column(name='DET_FLUX_ERR',
                             format='E',
                             array=deterr4 / cadenom)
        cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])
        instr.append(pyfits.BinTableHDU.from_columns(cols))
        instr[-1].header.cards['TTYPE1'].comment = 'column title: phase'
        instr[-1].header.cards[
            'TTYPE2'].comment = 'column title: simple aperture photometry'
        instr[-1].header.cards[
            'TTYPE3'].comment = 'column title: SAP 1-sigma error'
        instr[-1].header.cards[
            'TTYPE4'].comment = 'column title: pipeline conditioned photometry'
        instr[-1].header.cards[
            'TTYPE5'].comment = 'column title: PDC 1-sigma error'
        instr[-1].header.cards[
            'TTYPE6'].comment = 'column title: cotrended basis vector photometry'
        instr[-1].header.cards[
            'TTYPE7'].comment = 'column title: Detrended aperture photometry'
        instr[-1].header.cards[
            'TTYPE8'].comment = 'column title: DET 1-sigma error'
        instr[-1].header.cards['TFORM1'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM2'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM3'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM4'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM5'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM6'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM7'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM8'].comment = 'column type: float32'
        instr[-1].header.cards[
            'TUNIT2'].comment = 'column units: electrons per second'
        instr[-1].header.cards[
            'TUNIT3'].comment = 'column units: electrons per second'
        instr[-1].header.cards[
            'TUNIT4'].comment = 'column units: electrons per second'
        instr[-1].header.cards[
            'TUNIT5'].comment = 'column units: electrons per second'
        instr[-1].header.cards[
            'TUNIT6'].comment = 'column units: electrons per second'
        instr[-1].header['EXTNAME'] = ('FOLDED', 'extension name')
        instr[-1].header['PERIOD'] = (period, 'period defining the phase [d]')
        instr[-1].header['BJD0'] = (bjd0, 'time of phase zero [BJD]')
        instr[-1].header['BINMETHD'] = (binmethod, 'phase binning method')

        if binmethod == 'sigclip':
            instr[-1].header['THRSHOLD'] = (threshold,
                                            'sigma-clipping threshold [sigma]')
            instr[-1].header['NITER'] = (
                niter, 'max number of sigma-clipping iterations')

    # history keyword in output file
    kepkey.history(call, instr[0], outfile, logfile, verbose)
    instr.writeto(outfile)

    # clean up x-axis unit
    ptime1, ptime2 = np.array([], 'float32'), np.array([], 'float32')
    pout1, pout2 = np.array([], 'float32'), np.array([], 'float32')
    if bindata:
        work = sap4
        if plottype == 'pdc':
            work = pdc4
        if plottype == 'cbv':
            work = cbv4
        if plottype == 'det':
            work = det4
        for i in range(len(phase4)):
            if phase4[i] > 0.5:
                ptime2 = np.append(ptime2, phase4[i] - 1.0)
                pout2 = np.append(pout2, work[i])
        ptime2 = np.append(ptime2, phase4)
        pout2 = np.append(pout2, work)
        for i in range(len(phase4)):
            if phase4[i] <= 0.5:
                ptime2 = np.append(ptime2, phase4[i] + 1.0)
                pout2 = np.append(pout2, work[i])
    work = sap3
    if plottype == 'pdc':
        work = pdc3
    if plottype == 'cbv':
        work = cbv3
    if plottype == 'det':
        work = det3
    for i in range(len(phase3)):
        if phase3[i] > 0.5:
            ptime1 = np.append(ptime1, phase3[i] - 1.0)
            pout1 = np.append(pout1, work[i])
    ptime1 = np.append(ptime1, phase3)
    pout1 = np.append(pout1, work)
    for i in tqdm(range(len(phase3))):
        if phase3[i] <= 0.5:
            ptime1 = np.append(ptime1, phase3[i] + 1.0)
            pout1 = np.append(pout1, work[i])
    xlab = 'Orbital Phase ($\phi$)'

    # clean up y-axis units
    nrm = len(str(int(pout1[np.isfinite(pout1)].max()))) - 1
    pout1 = pout1 / 10**nrm
    pout2 = pout2 / 10**nrm
    if nrm == 0:
        ylab = 'e$^-$ s$^{-1}$'
    else:
        ylab = "10$^{0}$ {1}".format(nrm, 'e$^-$ s$^{-1}$')

    # data limits
    xmin = ptime1.min()
    xmax = ptime1.max()
    ymin = pout1[np.isfinite(pout1)].min()
    ymax = pout1[np.isfinite(pout1)].max()
    xr = xmax - xmin
    yr = ymax - ymin
    ptime1 = np.insert(ptime1, [0], [ptime1[0]])
    ptime1 = np.append(ptime1, [ptime1[-1]])
    pout1 = np.insert(pout1, [0], [0.0])
    pout1 = np.append(pout1, 0.0)
    if bindata:
        ptime2 = np.insert(ptime2, [0], ptime2[0] - 1.0 / nbins)
        ptime2 = np.insert(ptime2, [0], ptime2[0])
        ptime2 = np.append(
            ptime2, [ptime2[-1] + 1.0 / nbins, ptime2[-1] + 1.0 / nbins])
        pout2 = np.insert(pout2, [0], [pout2[-1]])
        pout2 = np.insert(pout2, [0], [0.0])
        pout2 = np.append(pout2, [pout2[2], 0.0])

    # plot new light curve
    if plottype != 'none':
        plt.figure()
        plt.clf()
        ax = plt.axes([0.06, 0.11, 0.93, 0.86])
        plt.gca().xaxis.set_major_formatter(
            plt.ScalarFormatter(useOffset=False))
        plt.gca().yaxis.set_major_formatter(
            plt.ScalarFormatter(useOffset=False))
        labels = ax.get_yticklabels()
        if bindata:
            plt.fill(ptime2, pout2, color='#ffff00', linewidth=0.0, alpha=0.2)
        else:
            if 'det' in plottype:
                plt.fill(ptime1,
                         pout1,
                         color='#ffff00',
                         linewidth=0.0,
                         alpha=0.2)
        plt.plot(ptime1,
                 pout1,
                 color='#0000ff',
                 linestyle='',
                 linewidth=2.0,
                 marker='.')
        if bindata:
            plt.plot(ptime2[1:-1],
                     pout2[1:-1],
                     color='r',
                     linestyle='-',
                     linewidth=2.0,
                     marker='')
        plt.xlabel(xlab, {'color': 'k'})
        plt.ylabel(ylab, {'color': 'k'})
        plt.xlim(-0.49999, 1.49999)
        if ymin >= 0.0:
            plt.ylim(ymin - yr * 0.01, ymax + yr * 0.01)
        else:
            plt.ylim(1.0e-10, ymax + yr * 0.01)
        plt.grid()
        plt.show()
    # close input file
    instr.close()
    # stop time
    kepmsg.clock('KEPFOLD ended at: ', logfile, verbose)
Example #20
0
    def get_param(self,
                  res,
                  lib_all,
                  zrecom,
                  Czrec0,
                  Czrec1,
                  z_cz,
                  scl_cz0,
                  scl_cz1,
                  fitc,
                  tau0=[0.1, 0.2, 0.3],
                  tcalc=1.):
        print('##########################')
        print('### Writing parameters ###')
        print('##########################')

        ID0 = self.ID0
        PA0 = self.PA0

        age = self.AGE
        nage = np.arange(0, len(age), 1)
        Zall = self.ZALL

        fnc = Func(Zall, nage)  # Set up the number of Age/ZZ
        bfnc = Basic(Zall)

        DIR_TMP = self.DIR_TEMP

        # Filters
        import os.path
        home = os.path.expanduser('~')
        fil_path = self.DIR_FILT
        nmc = self.NMC
        ndim = self.NDIM

        mmax = 300
        if nmc < mmax:
            mmax = int(nmc / 2.)

        # RF color
        uv = np.zeros(int(mmax), dtype='float32')
        bv = np.zeros(int(mmax), dtype='float32')
        vj = np.zeros(int(mmax), dtype='float32')
        zj = np.zeros(int(mmax), dtype='float32')

        # Lick indeces
        Dn4 = np.zeros(int(mmax), dtype='float32')
        Mgb = np.zeros(int(mmax), dtype='float32')
        Fe52 = np.zeros(int(mmax), dtype='float32')
        Fe53 = np.zeros(int(mmax), dtype='float32')
        Mg1 = np.zeros(int(mmax), dtype='float32')
        Mg2 = np.zeros(int(mmax), dtype='float32')
        G4300 = np.zeros(int(mmax), dtype='float32')
        NaD = np.zeros(int(mmax), dtype='float32')
        Hb = np.zeros(int(mmax), dtype='float32')
        #Muv  = np.zeros(int(mmax), dtype='float32')

        samples = res.chain[:, :, :].reshape((-1, ndim))  # Already burned.

        ##############################
        # Best parameters
        Amc = np.zeros((len(age), 3), dtype='float32')
        Ab = np.zeros(len(age), dtype='float32')
        Zmc = np.zeros((len(age), 3), dtype='float32')
        Zb = np.zeros(len(age), dtype='float32')
        NZbest = np.zeros(len(age), dtype='int')
        if self.f_dust:
            Mdustmc = np.zeros(3, dtype='float32')
            nTdustmc = np.zeros(3, dtype='float32')
            Tdustmc = np.zeros(3, dtype='float32')

        f0 = fits.open(DIR_TMP + 'ms_' + ID0 + '_PA' + PA0 + '.fits')
        sedpar = f0[1]
        ms = np.zeros(len(age), dtype='float32')
        msmc0 = 0
        for aa in range(len(age)):
            Ab[aa] = res.params['A' + str(aa)].value
            Amc[aa, :] = np.percentile(res.flatchain['A' + str(aa)],
                                       [16, 50, 84])
            try:
                Zb[aa] = res.params['Z' + str(aa)].value
                Zmc[aa, :] = np.percentile(res.flatchain['Z' + str(aa)],
                                           [16, 50, 84])
            except:
                Zb[aa] = res.params['Z0'].value
                Zmc[aa, :] = np.percentile(res.flatchain['Z0'], [16, 50, 84])

            NZbest[aa] = bfnc.Z2NZ(Zb[aa])
            ms[aa] = sedpar.data['ML_' + str(NZbest[aa])][aa]
            msmc0 += res.flatchain['A' + str(aa)] * ms[aa]

        msmc = np.percentile(msmc0, [16, 50, 84])
        Avb = res.params['Av'].value
        Avmc = np.percentile(res.flatchain['Av'], [16, 50, 84])
        AAvmc = [Avmc]
        try:
            zmc = np.percentile(res.flatchain['zmc'], [16, 50, 84])
        except:
            zmc = z_cz

        AA_tmp = np.zeros(len(age), dtype='float32')
        ZZ_tmp = np.zeros(len(age), dtype='float32')
        NZbest = np.zeros(len(age), dtype='int')
        DIR_TMP = self.DIR_TEMP

        #
        # Get mcmc model templates, plus some indicies.
        #
        #print(res.flatchain['Av'][0])
        from lmfit import Parameters
        fit_params = Parameters()
        for mm in range(0, mmax, 1):
            rn = np.random.randint(len(samples))
            '''
            par_tmp   = samples[np.random.randint(len(samples))]
            AA_tmp[:] = par_tmp[:len(age)]
            Av_tmp    = par_tmp[len(age)]
            if self.ZEVOL == 1:
                ZZ_tmp[:] = par_tmp[len(age)+1:len(age)+1+len(age)]
            else:
                ZZ_tmp[:] = par_tmp[len(age)+1:len(age)+1+1]

            '''
            for aa in range(len(age)):
                fit_params.add('A' + str(aa),
                               value=res.flatchain['A%d' % (aa)][rn],
                               min=0,
                               max=10000)
            fit_params.add('Av', value=res.flatchain['Av'][rn], min=0, max=10)
            for aa in range(len(age)):
                try:
                    fit_params.add('Z' + str(aa),
                                   value=res.flatchain['Z%d' % (aa)][rn],
                                   min=-10,
                                   max=10)
                except:
                    pass
            model2, xm_tmp = fnc.tmp04(ID0,
                                       PA0,
                                       fit_params,
                                       zrecom,
                                       lib_all,
                                       tau0=tau0)
            '''
            # not necessary here.
            if self.f_dust:
                model2_dust, xm_tmp_dust = fnc.tmp04_dust(ID0, PA0, par_tmp, zrecom, lib_all, tau0=tau0)
                model2 = np.append(model2,model2_dust)
                xm_tmp = np.append(xm_tmp,xm_tmp_dust)
            '''

            lmrest = xm_tmp / (1. + zrecom)
            band0 = ['u', 'b', 'v', 'j', 'sz']
            lmconv, fconv = filconv(band0, lmrest, model2,
                                    fil_path)  # model2 in fnu

            Dn4[mm] = calc_Dn4(xm_tmp, model2,
                               zrecom)  # Dust attenuation is not included?
            uv[mm] = -2.5 * np.log10(fconv[0] / fconv[2])
            bv[mm] = -2.5 * np.log10(fconv[1] / fconv[2])
            vj[mm] = -2.5 * np.log10(fconv[2] / fconv[3])
            zj[mm] = -2.5 * np.log10(fconv[4] / fconv[3])
            '''
            AA_tmp_sum = 0
            for ii in range(len(ZZ_tmp)):
                NZbest[ii]  = bfnc.Z2NZ(ZZ_tmp[ii])
                AA_tmp_sum += AA_tmp[ii]

                f0 = fits.open(DIR_TMP + 'index_'+str(NZbest[ii])+'.fits')

                Mgb[mm]  += f0[1].data['Mgb_'+str(NZbest[ii])][ii]    * AA_tmp[ii]
                Fe52[mm] += f0[1].data['Fe5270_'+str(NZbest[ii])][ii] * AA_tmp[ii]
                Fe53[mm] += f0[1].data['Fe5335_'+str(NZbest[ii])][ii] * AA_tmp[ii]
                G4300[mm]+= f0[1].data['G4300_'+str(NZbest[ii])][ii]  * AA_tmp[ii]
                NaD[mm]  += f0[1].data['NaD_'+str(NZbest[ii])][ii]    * AA_tmp[ii]
                Hb[mm]   += f0[1].data['Hb_'+str(NZbest[ii])][ii]     * AA_tmp[ii]
                Mg1[mm]  += f0[1].data['Mg1_'+str(NZbest[ii])][ii]    * AA_tmp[ii]
                Mg2[mm]  += f0[1].data['Mg2_'+str(NZbest[ii])][ii]    * AA_tmp[ii]

            Mgb[mm]   /= AA_tmp_sum
            Fe52[mm]  /= AA_tmp_sum
            Fe53[mm]  /= AA_tmp_sum
            G4300[mm] /= AA_tmp_sum
            NaD[mm]   /= AA_tmp_sum
            Hb[mm]    /= AA_tmp_sum
            Mg1[mm]   /= AA_tmp_sum
            Mg2[mm]   /= AA_tmp_sum
            '''

        conper = (Dn4 > -99)  #(Dn4>0)
        Dnmc = np.percentile(Dn4[conper], [16, 50, 84])
        uvmc = np.percentile(uv[conper], [16, 50, 84])
        bvmc = np.percentile(bv[conper], [16, 50, 84])
        vjmc = np.percentile(vj[conper], [16, 50, 84])
        zjmc = np.percentile(zj[conper], [16, 50, 84])

        Mgbmc = np.percentile(Mgb[conper], [16, 50, 84])
        Fe52mc = np.percentile(Fe52[conper], [16, 50, 84])
        Fe53mc = np.percentile(Fe53[conper], [16, 50, 84])
        G4300mc = np.percentile(G4300[conper], [16, 50, 84])
        NaDmc = np.percentile(NaD[conper], [16, 50, 84])
        Hbmc = np.percentile(Hb[conper], [16, 50, 84])
        Mg1mc = np.percentile(Mg1[conper], [16, 50, 84])
        Mg2mc = np.percentile(Mg2[conper], [16, 50, 84])

        ############
        # Get SN.
        ############
        file = DIR_TMP + 'spec_obs_' + ID0 + '_PA' + PA0 + '.cat'
        fds = np.loadtxt(file, comments='#')
        nrs = fds[:, 0]
        lams = fds[:, 1]
        fsp = fds[:, 2]
        esp = fds[:, 3]

        consp = (nrs < 10000) & (lams /
                                 (1. + zrecom) > 3600) & (lams /
                                                          (1. + zrecom) < 4200)
        NSN = len((fsp / esp)[consp])
        if NSN > 0:
            SN = np.median((fsp / esp)[consp])
        else:
            SN = 0

        ######################
        # Write in Fits table.
        ######################
        # Header
        prihdr = fits.Header()
        prihdr['ID'] = ID0
        prihdr['PA'] = PA0
        prihdr['Cz0'] = Czrec0
        prihdr['Cz1'] = Czrec1
        prihdr['z'] = zrecom
        prihdr['SN'] = SN
        prihdr['nSN'] = NSN
        prihdr['NDIM'] = ndim
        prihdr['tcalc'] = tcalc
        prihdr['chi2'] = fitc[0]
        prihdr['chi2nu'] = fitc[1]
        prihdu = fits.PrimaryHDU(header=prihdr)

        # Data
        col01 = []
        for aa in range(len(age)):
            col50 = fits.Column(name='A' + str(aa),
                                format='E',
                                unit='',
                                array=Amc[aa][:])
            col01.append(col50)

        for aa in range(len(AAvmc)):
            col50 = fits.Column(name='Av' + str(aa),
                                format='E',
                                unit='mag',
                                array=AAvmc[aa][:])
            col01.append(col50)

        for aa in range(len(Zmc)):
            col50 = fits.Column(name='Z' + str(aa),
                                format='E',
                                unit='logZsun',
                                array=Zmc[aa][:])
            col01.append(col50)

        if self.f_dust:
            Mdustmc[:] = np.percentile(res.flatchain['MDUST'], [16, 50, 84])
            nTdustmc[:] = np.percentile(res.flatchain['TDUST'], [16, 50, 84])
            Tdustmc[:] = self.DT0 + self.dDT * nTdustmc[:]
            col50 = fits.Column(name='MDUST',
                                format='E',
                                unit='Msun',
                                array=Mdustmc[:])
            col01.append(col50)
            col50 = fits.Column(name='nTDUST',
                                format='E',
                                unit='K',
                                array=nTdustmc[:])
            col01.append(col50)
            col50 = fits.Column(name='TDUST',
                                format='E',
                                unit='K',
                                array=Tdustmc[:])
            col01.append(col50)

        # zmc
        #if int(inputs['ZMC']) == 1:
        col50 = fits.Column(name='zmc', format='E', unit='', array=zmc[:])
        col01.append(col50)

        # Dn4000
        col50 = fits.Column(name='Dn4', format='E', unit='', array=Dnmc[:])
        col01.append(col50)

        # Mass
        col50 = fits.Column(name='ms', format='E', unit='Msun', array=msmc[:])
        col01.append(col50)

        # U-V
        col50 = fits.Column(name='uv', format='E', unit='mag', array=uvmc[:])
        col01.append(col50)

        # V-J
        col50 = fits.Column(name='vj', format='E', unit='mag', array=vjmc[:])
        col01.append(col50)

        # B-V
        col50 = fits.Column(name='bv', format='E', unit='mag', array=bvmc[:])
        col01.append(col50)

        # z-J
        col50 = fits.Column(name='zj', format='E', unit='mag', array=zjmc[:])
        col01.append(col50)

        # zmc
        col50 = fits.Column(name='z_cz', format='E', unit='', array=z_cz[:])
        col01.append(col50)

        # Chi
        col50 = fits.Column(name='chi', format='E', unit='', array=fitc[:])
        col01.append(col50)

        # C0 scale
        col50 = fits.Column(name='Cscale0',
                            format='E',
                            unit='',
                            array=scl_cz0[:])
        col01.append(col50)

        # C1 scale
        col50 = fits.Column(name='Cscale1',
                            format='E',
                            unit='',
                            array=scl_cz1[:])
        col01.append(col50)

        # Mgb
        col50 = fits.Column(name='Mgb', format='E', unit='', array=Mgbmc[:])
        col01.append(col50)

        # Fe5270
        col50 = fits.Column(name='Fe5270',
                            format='E',
                            unit='',
                            array=Fe52mc[:])
        col01.append(col50)

        # Fe5335
        col50 = fits.Column(name='Fe5335',
                            format='E',
                            unit='',
                            array=Fe53mc[:])
        col01.append(col50)

        # G4300
        col50 = fits.Column(name='G4300',
                            format='E',
                            unit='',
                            array=G4300mc[:])
        col01.append(col50)

        # NaD
        col50 = fits.Column(name='NaD', format='E', unit='', array=NaDmc[:])
        col01.append(col50)

        # Hb
        col50 = fits.Column(name='Hb', format='E', unit='', array=Hbmc[:])
        col01.append(col50)

        # Mg1
        col50 = fits.Column(name='Mg1', format='E', unit='', array=Mg1mc[:])
        col01.append(col50)

        # Mg2
        col50 = fits.Column(name='Mg2', format='E', unit='', array=Mg2mc[:])
        col01.append(col50)

        # Summarize;
        colms = fits.ColDefs(col01)
        dathdu = fits.BinTableHDU.from_columns(colms)
        hdu = fits.HDUList([prihdu, dathdu])
        hdu.writeto('summary_' + ID0 + '_PA' + PA0 + '.fits', overwrite=True)

        ##########
        # LINES
        ##########
        MB = Mainbody(self.inputs)
        LW, fLW = MB.get_lines(self.LW0)

        fw = open('table_' + ID0 + '_PA' + PA0 + '_lines.txt', 'w')
        fw.write(
            '# ID PA WL Fcont50 Fcont16 Fcont84 Fline50 Fline16 Fline84 EW50 EW16 EW84\n'
        )
        for ii in range(len(LW)):
            if fLW[ii] == 1:
                fw.write(
                    '%s %s %d %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n' %
                    (ID0, PA0, LW[ii], np.median(
                        Fcont[ii, :]), np.percentile(Fcont[ii, :], 16),
                     np.percentile(Fcont[ii, :], 84), np.median(
                         Fline[ii, :]), np.percentile(Fline[ii, :], 16),
                     np.percentile(Fline[ii, :], 84), np.median(
                         EW[ii, :]), np.percentile(
                             EW[ii, :], 16), np.percentile(EW[ii, :], 84)))
            else:
                fw.write('%s %s %d 0 0 0 0 0 0 0 0 0\n' % (ID0, PA0, LW[ii]))
        fw.close()
Example #21
0
def subtract_self(frand, index, FTSobsid, redshift, width_of_sm_kernel,
                  apod_width, Name, window):
    freq_Cp = 1900.55 / (1 + redshift)
    freq_Oiii88 = frand  #random freq signal
    freq_Oiii51 = 5785.88 / (1 + redshift)
    freq_NII122 = 2459.38 / (1 + redshift)
    freq_OI63 = 4744.75 / (1 + redshift)
    freq_H2O1 = 1661. / (1 + redshift)
    freq_H2O2 = 1670. / (1 + redshift)
    freq_OH = 2512.30 / (1 + redshift)
    freq_HF10 = 1232.48 / (1 + redshift)
    freq_OI145 = 2060.07 / (1 + redshift)

    freq_list = [
        [freq_Cp, "C$^+$ 158"],
        [freq_Oiii88, 'O$_{III} 88$'],
        [freq_Oiii51, 'O$_{III} 51$'],
        [freq_NII122, 'N$_{II}$122'],
        [freq_OI63, 'O$_{I}$63'],
        [freq_H2O1, 'H$_2$O-1'],
        [freq_H2O2, 'H$_2$O-2'],
        [freq_OH, 'OH'],
        [freq_OI145, 'O$_I$145'],
    ]

    ## read in the target spectra
    spec = fits.open(str(FTSobsid) + '_HR_spectrum_point_apod.fits')
    #   spec     = fits.open(str(FTSobsid)+'_HR_spectrum_point.fits')
    spec_ori = fits.open(str(FTSobsid) + '_HR_spectrum_point.fits')

    # define detectors of SLW and SSW
    centreDetectors = ["SLWC3", "SSWD4"]

    # define the output spectra (using the central pixel)
    cent_spec_SLW = np.full((3, 1905), 1.0)
    cent_spec_SSW = np.full((3, 2082), 1.0)

    # assignment of the output spectra
    for k in range(2, 24):
        if (spec[k].header['EXTNAME'] == centreDetectors[0]):
            cent_spec_SLW[0, ] = spec[k].data.wave
            cent_spec_SLW[1, ] = spec[k].data.flux
            cent_spec_SLW[2, ] = spec_ori[k].data.error / np.sqrt(
                apod_width / 1.2)

        if (spec[k].header['EXTNAME'] == centreDetectors[1]):
            cent_spec_SSW[0, ] = spec[k].data.wave
            cent_spec_SSW[1, ] = spec[k].data.flux
            cent_spec_SSW[2, ] = spec_ori[k].data.error / np.sqrt(
                apod_width / 1.2)

    ## -------------define different Gaussian Kernels
    g = Gaussian1DKernel(stddev=width_of_sm_kernel)

    ## --------------- make random frequency of sinc functions and add in

    p0 = 0.3  # peak
    p1 = frand  #800      # central freq
    p2 = 0.37733 * 2  # Delta sigma / pi; FWHM = 1.20671 * \Delta sigma
    x = cent_spec_SLW[0, ]
    sinconly = p0 * np.sinc((x - p1) / p2)
    spec_n_sinc = sinconly + cent_spec_SLW[1, ]
    cent_spec_SLW[1, ] = spec_n_sinc

    #------------------------------------------------------------
    # directly derive the local baseline using the spectra themselves, without dark sky subtraction
    ## -  mask/flag the channels with signals
    #------------------------------------------------------------
    cent_SLW_nan = copy.copy(cent_spec_SLW[1])
    for freq_obs, name in freq_list:
        cent_SLW_nan[np.where(
            np.abs(cent_spec_SLW[0] - freq_obs) < window / 2)] = np.nan

    cent_SSW_nan = copy.copy(cent_spec_SSW[1])
    for freq_obs, name in freq_list:
        cent_SSW_nan[np.where(
            np.abs(cent_spec_SSW[0] - freq_obs) < window / 2)] = np.nan

#------------------------------------------------------------

## ----- convolve the central spectra (after blanking) to lower resolutions to derive the 'local' baseline shape.
    cent_SLW_nan_sm = convolve(cent_SLW_nan, g, boundary='extend')
    cent_SLW_subtract_self_base = cent_spec_SLW[1] - cent_SLW_nan_sm
    cent_SSW_nan_sm = convolve(cent_SSW_nan, g, boundary='extend')
    cent_SSW_subtract_self_base = cent_spec_SSW[1] - cent_SSW_nan_sm
    cent_SLW_err = cent_spec_SLW[2, ]
    cent_SSW_err = cent_spec_SSW[2, ]

    edge_cut = 0  #120
    edge_SLW_low_cut = 0  #30
    edge_SLW_high_cut = 0  #180
    edge_SSW_low_cut = 0  #30
    edge_SSW_high_cut = 0  #30
    SLW_size = len(cent_spec_SLW[0])
    SSW_size = len(cent_spec_SSW[0])

    cent_SLW_subtract_self_base = copy.copy(
        cent_SLW_subtract_self_base[edge_SLW_low_cut:SLW_size -
                                    edge_SLW_high_cut])
    cent_SSW_subtract_self_base = copy.copy(
        cent_SSW_subtract_self_base[edge_SSW_low_cut:SSW_size -
                                    edge_SSW_high_cut])
    cent_SLW_wave = copy.copy(cent_spec_SLW[0][edge_SLW_low_cut:SLW_size -
                                               edge_SLW_high_cut])
    cent_SSW_wave = copy.copy(cent_spec_SSW[0][edge_SSW_low_cut:SSW_size -
                                               edge_SSW_high_cut])

    #-------------- plot the added sinc spectra ----------------
    plt.clf()
    fig, ax_f = plt.subplots()
    ax_f.plot(cent_spec_SLW[0], sinconly, label='SLW', linewidth=0.1)
    #   ax_f.plot(cent_spec_SSW[0], spec_n_sinc ,    label='SSW'   , linewidth=0.1 )
    ymin, ymax = ax_f.get_ylim()

    ax_f.set_xlim(400, 1600)
    ax_f.annotate(Name, (1200, ymax * 0.9), size=12)
    ax_f.annotate('obsid ' + str(FTSobsid), (1200, ymax * 0.8), size=12)
    ax_f.text(250, 0.2, "Flux density (Jy)", rotation=90, size='12')
    ax_f.text(800, ymin - 0.2, "Frequency (GHz)", size='12')

    textlines(ax_f, redshift, 0.3, freq_list)
    plt.legend(loc=2, borderaxespad=0.)
    plt.savefig('plots/overlay/' + str(FTSobsid) + str(index) +
                '_sinc_only.pdf')
    #-------------- plot the baseline and original spectra ----------------

    #-------------- plot the baseline and original spectra ----------------
    plt.clf()
    fig, ax_f = plt.subplots()
    ax_f.plot(cent_spec_SLW[0], cent_spec_SLW[1], label='SLW', linewidth=0.1)
    ax_f.plot(cent_spec_SSW[0], cent_spec_SSW[1], label='SSW', linewidth=0.1)
    ax_f.plot(cent_spec_SLW[0], cent_SLW_nan_sm, label='BL SLW', linewidth=0.1)
    ax_f.plot(cent_spec_SSW[0], cent_SSW_nan_sm, label='BL SSW', linewidth=0.1)
    print(cent_SLW_nan_sm)
    ymin, ymax = ax_f.get_ylim()

    ax_f.set_xlim(400, 1600)
    ax_f.annotate(Name, (1200, ymax * 0.9), size=12)
    ax_f.annotate('obsid ' + str(FTSobsid), (1200, ymax * 0.8), size=12)

    ax_f.text(250, 0.2, "Flux density (Jy)", rotation=90, size='12')
    ax_f.text(800, ymin - 0.2, "Frequency (GHz)", size='12')

    textlines(ax_f, redshift, 0.3, freq_list)
    plt.legend(loc=2, borderaxespad=0.)
    plt.savefig('plots/overlay/' + str(FTSobsid) + str(index) +
                '_subtracted.pdf')
    #-------------- plot the baseline and original spectra ----------------

    #-------------- plot the baseline subtracted spectra ----------------
    plt.clf()
    fig, ax_f = plt.subplots()
    ax_f.plot(cent_SLW_wave,
              cent_SLW_subtract_self_base,
              label='SLW',
              linewidth=0.1)
    #   ax_f.plot(cent_spec_SLW[0], cent_SLW_err,                    label='SLW err')
    #   ax_f.plot(cent_spec_SLW[0], cent_SLW_err*(-1))
    ax_f.plot(cent_SSW_wave,
              cent_SSW_subtract_self_base,
              label='SSW',
              linewidth=0.1)
    #   ax_f.plot(cent_spec_SSW[0], cent_SSW_subtract_self_base* 0,  label=str(Name))
    #   ax_f.plot(cent_spec_SSW[0], cent_SLW_err,                    label='SSW err')
    #   ax_f.plot(cent_spec_SSW[0], cent_SLW_err*(-1))

    ax_f.set_ylim(-0.5, 0.7)
    ax_f.set_xlim(400, 1600)
    ax_f.annotate(Name, (1400, 0.6), size=12)
    ax_f.text(250, 0.2, "Flux density (Jy)", rotation=90, size='12')
    ax_f.text(800, -0.6, "Frequency (GHz)", size='12')
    ax_f.set_xlim(freq_Oiii88 - 20, freq_Oiii88 + 20)

    textlines(ax_f, redshift, 0.3, freq_list)
    plt.legend(loc=2, borderaxespad=0.)
    plt.savefig('plots/overlay/all' + str(FTSobsid) + str(index) +
                '_subtracted.pdf')

    ax_f.set_ylim(-0.5, 0.7)
    plt.legend(loc=2, borderaxespad=0.)
    textlines(ax_f, redshift, 0.3, freq_list)
    ax_f.set_xlim(freq_Cp - 20, freq_Cp + 20)
    plt.savefig('plots/overlay/cplus' + str(FTSobsid) + str(index) +
                '_subtracted.pdf')
    ax_f.set_xlim(freq_Oiii88 - 20, freq_Oiii88 + 20)
    plt.savefig('plots/overlay/oiii88' + str(FTSobsid) + str(index) +
                '_subtracted.pdf')

    #-----------------------------------------------------------------

    #----------------- output fits files --------------------
    # SLW
    hdu = fits.PrimaryHDU()
    table_hdu = fits.new_table(
        fits.ColDefs([
            fits.Column(name='wave',
                        format='D',
                        unit='GHz',
                        array=cent_spec_SLW[0]),
            fits.Column(name='flux',
                        format='D',
                        unit='Jy',
                        array=cent_SLW_subtract_self_base),
            fits.Column(name='error',
                        format='D',
                        unit='Jy',
                        array=cent_spec_SLW[2, ]),
        ]))
    hdulist = fits.HDUList([hdu, table_hdu])
    hdulist.writeto('baselined/' + str(FTSobsid) +
                    '_HR_spectrum_point_SLW_apod_baselined.fits',
                    clobber=True)
    # SSW
    hdu = fits.PrimaryHDU()
    table_hdu = fits.new_table(
        fits.ColDefs([
            fits.Column(name='wave',
                        format='D',
                        unit='GHz',
                        array=cent_spec_SSW[0]),
            fits.Column(name='flux',
                        format='D',
                        unit='Jy',
                        array=cent_SSW_subtract_self_base),
            fits.Column(name='error',
                        format='D',
                        unit='Jy',
                        array=cent_spec_SSW[2, ]),
        ]))
    hdulist = fits.HDUList([hdu, table_hdu])
    hdulist.writeto('baselined/' + str(FTSobsid) +
                    '_HR_spectrum_point_SSW_apod_baselined.fits',
                    clobber=True)
Example #22
0
def main(argv=None):
    import argparse
    parser = argparse.ArgumentParser(
        description=
        "Use PINT to compute event phases and make plots of photon event files."
    )
    parser.add_argument(
        "eventfile",
        help=
        "Photon event FITS file name (e.g. from NICER, RXTE, XMM, Chandra).")
    parser.add_argument("parfile", help="par file to construct model from")
    parser.add_argument("--orbfile", help="Name of orbit file", default=None)
    parser.add_argument("--maxMJD",
                        help="Maximum MJD to include in analysis",
                        default=None)
    parser.add_argument("--plotfile",
                        help="Output figure file name (default=None)",
                        default=None)
    parser.add_argument("--addphase",
                        help="Write FITS file with added phase column",
                        default=False,
                        action='store_true')
    parser.add_argument(
        "--absphase",
        help="Write FITS file with integral portion of pulse phase (ABS_PHASE)",
        default=False,
        action='store_true')
    parser.add_argument(
        "--barytime",
        help=
        "Write FITS file with a column containing the barycentric time as double precision MJD.",
        default=False,
        action='store_true')
    parser.add_argument(
        "--outfile",
        help="Output FITS file name (default=same as eventfile)",
        default=None)
    parser.add_argument("--ephem",
                        help="Planetary ephemeris to use (default=DE421)",
                        default="DE421")
    parser.add_argument(
        '--tdbmethod',
        help="Method for computing TT to TDB (default=astropy)",
        default="default")
    parser.add_argument("--plot",
                        help="Show phaseogram plot.",
                        action='store_true',
                        default=False)
    parser.add_argument("--use_gps",
                        default=False,
                        action='store_true',
                        help="Apply GPS to UTC clock corrections")
    parser.add_argument("--use_bipm",
                        default=False,
                        action='store_true',
                        help="Use TT(BIPM) instead of TT(TAI)")
    #    parser.add_argument("--fix",help="Apply 1.0 second offset for NICER", action='store_true', default=False)
    args = parser.parse_args(argv)

    # If outfile is specified, that implies addphase
    if args.outfile is not None:
        args.addphase = True

    # If plotfile is specified, that implies plot
    if args.plotfile is not None:
        args.plot = True

    # Read event file header to figure out what instrument is is from
    hdr = pyfits.getheader(args.eventfile, ext=1)

    log.info('Event file TELESCOPE = {0}, INSTRUMENT = {1}'.format(
        hdr['TELESCOP'], hdr['INSTRUME']))
    if hdr['TELESCOP'] == 'NICER':
        # Instantiate NICERObs once so it gets added to the observatory registry
        if args.orbfile is not None:
            log.info('Setting up NICER observatory')
            NICERObs(name='NICER', FPorbname=args.orbfile, tt2tdb_mode='pint')
        # Read event file and return list of TOA objects
        try:
            tl = load_NICER_TOAs(args.eventfile)
        except KeyError:
            log.error(
                "Observatory not recognized.  This probably means you need to provide an orbit file or barycenter the event file."
            )
            sys.exit(1)
    elif hdr['TELESCOP'] == 'XTE':
        # Instantiate RXTEObs once so it gets added to the observatory registry
        if args.orbfile is not None:
            # Determine what observatory type is.
            log.info('Setting up RXTE observatory')
            RXTEObs(name='RXTE', FPorbname=args.orbfile, tt2tdb_mode='pint')
        # Read event file and return list of TOA objects
        tl = load_RXTE_TOAs(args.eventfile)
    elif hdr['TELESCOP'].startswith('XMM'):
        # Not loading orbit file here, since that is not yet supported.
        tl = load_XMM_TOAs(args.eventfile)
    elif hdr['TELESCOP'].startswith('NuSTAR'):
        # Not loading orbit file here, since that is not yet supported.
        tl = load_NuSTAR_TOAs(args.eventfile)
    else:
        log.error(
            "FITS file not recognized, TELESCOPE = {0}, INSTRUMENT = {1}".
            format(hdr['TELESCOP'], hdr['INSTRUME']))
        sys.exit(1)

    # Now convert to TOAs object and compute TDBs and posvels
    if len(tl) == 0:
        log.error("No TOAs, exiting!")
        sys.exit(0)

    # Read in model
    modelin = pint.models.get_model(args.parfile)
    use_planets = False
    if 'PLANET_SHAPIRO' in modelin.params:
        if modelin.PLANET_SHAPIRO.value:
            use_planets = True

    # Discard events outside of MJD range
    if args.maxMJD is not None:
        tlnew = []
        print("pre len : ", len(tl))
        maxT = Time(float(args.maxMJD), format='mjd')
        print("maxT : ", maxT)
        for tt in tl:
            if tt.mjd < maxT:
                tlnew.append(tt)
        tl = tlnew
        print("post len : ", len(tlnew))

    ts = toa.get_TOAs_list(tl,
                           ephem=args.ephem,
                           include_bipm=args.use_bipm,
                           include_gps=args.use_gps,
                           planets=use_planets,
                           tdb_method=args.tdbmethod)
    ts.filename = args.eventfile
    #    if args.fix:
    #        ts.adjust_TOAs(TimeDelta(np.ones(len(ts.table))*-1.0*u.s,scale='tt'))

    print(ts.get_summary())
    mjds = ts.get_mjds()
    print(mjds.min(), mjds.max())

    # Compute model phase for each TOA
    iphss, phss = modelin.phase(ts, abs_phase=True)
    # ensure all postive
    negmask = phss < 0.0 * u.cycle
    phases = np.where(negmask, phss + 1.0 * u.cycle, phss)
    h = float(hm(phases))
    print("Htest : {0:.2f} ({1:.2f} sigma)".format(h, h2sig(h)))
    if args.plot:
        phaseogram_binned(mjds, phases, bins=100, plotfile=args.plotfile)

    if args.addphase:
        # Read input FITS file (again).
        # If overwriting, open in 'update' mode
        if args.outfile is None:
            hdulist = pyfits.open(args.eventfile, mode='update')
        else:
            hdulist = pyfits.open(args.eventfile)
        if len(hdulist[1].data) != len(phases):
            raise RuntimeError(
                'Mismatch between length of FITS table ({0}) and length of phase array ({1})!'
                .format(len(hdulist[1].data), len(phases)))
        data_to_add = {'PULSE_PHASE': [phases, 'D']}
        if args.absphase:
            data_to_add['ABS_PHASE'] = [iphss - negmask * u.cycle, 'K']
        if args.barytime:
            bats = modelin.get_barycentric_toas(ts)
            data_to_add['BARY_TIME'] = [bats, 'D']
        datacol = []
        for key in data_to_add.keys():
            if key in hdulist[1].columns.names:
                log.info('Found existing %s column, overwriting...' % key)
                # Overwrite values in existing Column
                hdulist[1].data[key] = data_to_add[key][0]
            else:
                # Construct and append new column, preserving HDU header and name
                log.info('Adding new %s column.' % key)
                datacol.append(
                    pyfits.ColDefs([
                        pyfits.Column(name=key,
                                      format=data_to_add[key][1],
                                      array=data_to_add[key][0])
                    ]))
        if len(datacol) > 0:
            cols = hdulist[1].columns
            for c in datacol:
                cols = cols + c
            bt = pyfits.BinTableHDU.from_columns(cols,
                                                 header=hdulist[1].header,
                                                 name=hdulist[1].name)
            hdulist[1] = bt
        if args.outfile is None:
            # Overwrite the existing file
            log.info('Overwriting existing FITS file ' + args.eventfile)
            hdulist.flush(verbose=True, output_verify='warn')
        else:
            # Write to new output file
            log.info('Writing output FITS file ' + args.outfile)
            hdulist.writeto(args.outfile,
                            overwrite=True,
                            checksum=True,
                            output_verify='warn')
Example #23
0
    def _write_singledish_hdu(self):
        """
        Define the SINGLE DISH table.
        """

        scanList = []
        dateList = []
        timeList = []
        intTimeList = []
        beamList = []
        mList = []
        rawList = []
        scanCount = 1
        for i, dataSet in enumerate(self.data):
            if dataSet.pol == self.stokes[0]:
                tempMList = {}
                for stokes in self.stokes:
                    tempMList[stokes] = {}

            beams = list(dataSet.dataDict.keys())
            beams.sort()
            for b in beams:
                specData = dataSet.dataDict[b]

                # Load the data into a matrix
                tempMList[dataSet.pol][b] = specData.ravel()

                if dataSet.pol == self.stokes[0]:
                    # Observation date and time
                    utc = astro.taimjd_to_utcjd(dataSet.obsTime)
                    date = astro.get_date(utc)
                    date.hours = 0
                    date.minutes = 0
                    date.seconds = 0
                    utc0 = date.to_jd()

                    scanList.append(scanCount)
                    dateList.append('%4i-%02i-%02i' %
                                    (date.years, date.months, date.days))
                    timeList.append((utc - utc0) * 24 * 3600)
                    intTimeList.append(dataSet.intTime)
                    beamList.append(b.id)
                    rawList.append(b)

            if dataSet.pol == self.stokes[-1]:
                for b in rawList:
                    matrix = numpy.zeros((self.nStokes, self.nChan),
                                         dtype=numpy.float32)
                    for p in range(self.nStokes):
                        try:
                            matrix[p, :] = tempMList[self.stokes[p]][b]
                        except KeyError:
                            warnings.warn(
                                colorfy(
                                    "{{%%yellow Key mis-match %s %s}}" %
                                    (str(b),
                                     str(tempMList[self.stokes[p]].keys()))),
                                RuntimeWarning)

                    mList.append(matrix.ravel())
                scanCount += 1
                rawList = []

        # Scan number
        c1 = astrofits.Column(name='SCAN',
                              format='1I',
                              array=numpy.array(scanList))
        ## Cycle
        #c2 = astrofits.Column(name='CYCLE', format='1J',
        #array=numpy.array([1,]*len(scanList)))
        # DATE-OBS
        c3 = astrofits.Column(name='DATE-OBS',
                              format='10A',
                              array=numpy.array(dateList))
        # Time elapsed since 0h
        c4 = astrofits.Column(name='TIME',
                              format='1D',
                              unit='s',
                              array=numpy.array(timeList))
        # Integration time (seconds)
        c5 = astrofits.Column(name='EXPOSURE',
                              format='1E',
                              unit='s',
                              array=numpy.array(intTimeList,
                                                dtype=numpy.float32))
        # Object name
        c6 = astrofits.Column(name='OBJECT',
                              format='16A',
                              array=numpy.array([
                                  'LWA_OBS',
                              ] * len(scanList)))
        # Object position (deg and deg)
        c7 = astrofits.Column(name='OBJ-RA',
                              format='1D',
                              unit='deg',
                              array=numpy.array([
                                  0.0,
                              ] * len(scanList)))
        c8 = astrofits.Column(name='OBJ-DEC',
                              format='1D',
                              unit='deg',
                              array=numpy.array([
                                  0.0,
                              ] * len(scanList)))
        # Rest frequency (Hz)
        c9 = astrofits.Column(name='RESTFRQ',
                              format='1D',
                              unit='Hz',
                              array=numpy.array([
                                  0.0,
                              ] * len(scanList)))
        # Observation mode
        c10 = astrofits.Column(name='OBSMODE',
                               format='16A',
                               array=numpy.array([
                                   self.mode,
                               ] * len(scanList)))
        # Beam (tuning)
        c11 = astrofits.Column(name='BEAM',
                               format='1I',
                               array=numpy.array(beamList))
        # IF
        c12 = astrofits.Column(name='IF',
                               format='1I',
                               array=numpy.array([
                                   self.freq[0].id,
                               ] * len(scanList)))
        # Frequency resolution (Hz)
        c13 = astrofits.Column(name='FREQRES',
                               format='1D',
                               unit='Hz',
                               array=numpy.array([
                                   self.freq[0].chWidth,
                               ] * len(scanList)))
        # Bandwidth of the system (Hz)
        c14 = astrofits.Column(name='BANDWID',
                               format='1D',
                               unit='Hz',
                               array=numpy.array([
                                   self.freq[0].totalBW,
                               ] * len(scanList)))
        # Frequency axis - 1
        c15 = astrofits.Column(name='CRPIX1',
                               format='1E',
                               array=numpy.array([
                                   self.refPix,
                               ] * len(scanList)))
        c16 = astrofits.Column(name='CRVAL1',
                               format='1D',
                               unit='Hz',
                               array=numpy.array([
                                   self.refVal,
                               ] * len(scanList)))
        c17 = astrofits.Column(name='CDELT1',
                               format='1D',
                               unit='Hz',
                               array=numpy.array([
                                   self.freq[0].chWidth,
                               ] * len(scanList)))
        c18 = astrofits.Column(name='CRVAL3',
                               format='1D',
                               unit='deg',
                               array=numpy.array([
                                   0.0,
                               ] * len(scanList)))
        # Dec. axis - 4
        c19 = astrofits.Column(name='CRVAL4',
                               format='1D',
                               unit='deg',
                               array=numpy.array([
                                   0.0,
                               ] * len(scanList)))
        ## Scan rate
        #c20 = astrofits.Column(name='SCANRATE', format='2E', unit='deg/s',
        #array=numpy.array([[0,0],]*len(scanList)))

        #
        # Calibration information (currently not implemented)
        #
        ## System temperature  *** UNKNOWN ***
        #c21 =  astrofits.Column(name='TSYS', format='2E', unit='K',
        #array=numpy.array([[self.tSys,self.tSys],]*len(scanList)))
        ## CALFCTR *** UNKNOWN ***
        #c22 =  astrofits.Column(name='CALFCTR', format='2E', unit='K',
        #array=numpy.array([[1,1],]*len(scanList)))

        # Data
        c23 = astrofits.Column(name='DATA',
                               format='%iE' % (self.nStokes * self.nChan),
                               unit='UNCALIB',
                               array=numpy.array(mList))

        #
        # Data masking table (currently not implemented)
        #
        # Flag table
        #c24 = astrofits.Column(name='FLAGGED', format='%iB' % (self.nStokes*self.nChan),
        #array=numpy.array([[0,]*self.nStokes*self.nChan for s in scanList]))

        #
        # Calibration information (currently not implemented)
        #
        ## TCAL *** UNKNOWN ***
        #c25 = astrofits.Column(name='TCAL', format='2E', unit='Jy',
        #array=numpy.array([[1,1] for s in scanList]))
        ## TCALTIME *** UNKNOWN ***
        #c26 = astrofits.Column(name='TCALTIME', format='16A',
        #array=numpy.array(['UNKNOWN',]*len(scanList)))

        #
        # Pointing information (currently not implemented)
        #
        ## Azimuth *** UNKNOWN ***
        #c27 = astrofits.Column(name='AZIMUTH', format='1E', unit='deg',
        #array=numpy.array([0,]*len(scanList)))
        ## Elevation *** UNKNOWN ***
        #c28 = astrofits.Column(name='ELEVATIO', format='1E', unit='deg',
        #array=numpy.array([0,]*len(scanList)))
        ## Parallactic angle *** UNKNOWN ***
        #c29 = astrofits.Column(name='PARANGLE', format='1E', unit='deg',
        #array=numpy.array([0,]*len(scanList)))

        #
        # Focusing information (currently not implemented and probably never will be)
        #
        ## FOCUSAXI *** NOT NEEDED ***
        #c30 = astrofits.Column(name='FOCUSAXI', format='1E', unit='m',
        #array=numpy.array([0,]*len(scanList)))
        ## FOCUSTAN *** NOT NEEDED ***
        #c31 = astrofits.Column(name='FOCUSTAN', format='1E', unit='m',
        #array=numpy.array([0,]*len(scanList)))
        ## FOCUSROT *** NOT NEEDED ***
        #c32 = astrofits.Column(name='FOCUSROT', format='1E', unit='deg',
        #array=numpy.array([0,]*len(scanList)))

        #
        # Weather information (currently not implemented)
        #
        ## Ambient temperature *** UNKNOWN ***
        #c33 = astrofits.Column(name='TAMBIENT', format='1E', unit='C',
        #array=numpy.array([0,]*len(scanList)))
        ## Air pressure *** UNKNOWN ***
        #c34 = astrofits.Column(name='PRESSURE', format='1E', unit='Pa',
        #array=numpy.array([0,]*len(scanList)))
        ## Humidity *** UNKNOWN ***
        #c35 = astrofits.Column(name='HUMIDITY', format='1E', unit='%',
        #array=numpy.array([0,]*len(scanList)))
        ## Wind speed *** UNKNOWN ***
        #c36 = astrofits.Column(name='WINDSPEE', format='1E', unit='m/s',
        #array=numpy.array([0,]*len(scanList)))
        ## Wind direction *** UNKNOWN ***
        #c37 = astrofits.Column(name='WINDDIRE', format='1E', unit='deg',
        #array=numpy.array([0,]*len(scanList)))

        # Gather together all of the needed columns and figure out which ones
        # store the data and flag tables.  This information is needed later to
        # set the appropriate TDIM keywords.
        cs = [
            c1, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16,
            c17, c18, c19, c23
        ]
        dataIndex = 0
        #flagIndex = 0
        for i, c in enumerate(cs):
            try:
                if c.name == 'DATA':
                    dataIndex = i + 1
                #if c.name == 'FLAGGED':
                #flagIndex = n
            except NameError:
                pass
        colDefs = astrofits.ColDefs(cs)

        # Create the SINGLE DISH table and update its header
        sd = astrofits.BinTableHDU.from_columns(colDefs)

        ## Single disk keywords - order seems to matter
        sd.header['EXTNAME'] = ('SINGLE DISH', 'SDFITS table name')
        sd.header['NMATRIX'] = 1
        sd.header['OBSERVER'] = (self.observer, 'Observer name(s)')
        sd.header['PROJID'] = (self.project, 'Project name')
        sd.header['TELESCOP'] = (self.site.name, 'Telescope name')
        x, y, z = self.site.geocentric_location
        sd.header['OBSGEO-X'] = (x, '[m] Antenna ECEF X-coordinate')
        sd.header['OBSGEO-Y'] = (y, '[m] Antenna ECEF Y-coordinate')
        sd.header['OBSGEO-Z'] = (z, '[m] Antenna ECEF Z-coordinate')

        sd.header['SPECSYS'] = ('LSRK',
                                'Doppler reference frame (transformed)')
        sd.header['SSYSOBS'] = ('TOPOCENT',
                                'Doppler reference frame of observation')
        sd.header['EQUINOX'] = (2000.0, 'Equinox of equatorial coordinates')
        sd.header['RADESYS'] = ('FK5', 'Equatorial coordinate system frame')

        ## Data and flag table dimensionality
        sd.header['TDIM%i' % dataIndex] = ('(%i,%i,1,1)' %
                                           (self.nChan, self.nStokes))
        #sd.header.set('TDIM%i' % flagIndex, '(%i,%i,1,1)' % (self.nChan, self.nStokes), after='TFORM%i' % flagIndex)

        ## Data and flag table axis descriptions
        ### Frequency
        sd.header['CTYPE1'] = ('FREQ', 'axis 1 is FREQ (frequency)')
        sd.header['CDELT1'] = self.freq[0].chWidth
        sd.header['CRPIX1'] = self.refPix
        sd.header['CRVAL1'] = self.refVal
        ### Stokes
        sd.header['CTYPE2'] = ('STOKES',
                               'axis 2 is STOKES axis (polarization)')
        if self.stokes[0] < 0:
            sd.header['CDELT2'] = -1.0
        else:
            sd.header['CDELT2'] = 1.0
        sd.header['CRPIX2'] = 1.0
        sd.header['CRVAL2'] = float(self.stokes[0])
        ### RA
        sd.header['CTYPE3'] = ('RA', 'axis 3 is RA axis (pointing)')
        sd.header['CRPIX3'] = 1.0
        sd.header['CDELT3'] = -1.0
        ### Dec
        sd.header['CTYPE4'] = ('DEC', 'axis 4 is Dec. axis (pointing)')
        sd.header['CRPIX4'] = 1.0
        sd.header['CDELT4'] = 1.0

        self.FITS.append(sd)
        self.FITS.flush()
Example #24
0
def keptrial(infile,
             outfile,
             datacol='SAP_FLUX',
             errcol='SAP_FLUX_ERR',
             fmin=0.1,
             fmax=50,
             nfreq=100,
             method='ft',
             ntrials=1000,
             plot=False,
             overwrite=False,
             verbose=False,
             logfile='keptrial.log'):
    """
    keptrial -- Calculate best period and error estimate from time series

    ``keptrial`` measures the strongest period within the frequency range
    :math:`fmin` to :math:`fmax` and estimates 1-:math:`\sigma` error
    associated with that period. The error estimate is performed by
    constructing ntrial new light curves from the original data provided in
    datacol and adjusting each individual data point according to a random
    number generator and a shot noise model. While a shot noise model is not
    uniformly applicable to all Kepler targets it provides a useful 1st order
    estimate for most. A power spectrum is calculated for each light curve
    using a user-specified method and the highest peak in each power spectrum
    recorded. The distribution of peaks is fit by a normal function, the
    centroid is adopted as the best period and 1-standard deviation error is
    taken from the standard deviation. A confidence limit is recorded as the
    range within which all trial periods fall. While this is termed a '100%'
    confidence limit, this only refers to the total number of trials rather
    than formal confidence.

    The larger the number of **ntrials**, the more robust the result. The
    values of nfreq and ntrial have to be chosen carefully to avoid excessive
    run times. The values of **fmin**, **fmax** and **nfreq** have to be
    chosen carefully in order to provide a sensible measure of period and
    error. It is recommended that ``kepft`` be used to estimate the period and
    error before attempting to use ``keptrial``. An exercise of trial and error
    will most-likely be needed to choose a permutation of :math:`fmin`,
    :math:`fmax` and :math:`nfreq` that resolves the period distribution over a
    significant number of frequency bins. If requested, the distribution and
    normal fit are plotted. The plot updates after every ntrial iteration,
    partly to relieve boredom, and partly for the user to assess whether they
    are using the correct permutation of input parameters.

    Parameters
    ----------
    infile : str
        The name of a MAST standard format FITS file containing a Kepler light
        curve within the first data extension.
    outfile : str
        The name of the output FITS file with a new extension containing the
        results of a Monte Carlo period analysis.
    datacol : str
        The column name containing data stored within extension 1 of infile.
        This data is the input data for a series of Fourier transform
        calculations. Typically this name is SAP_FLUX (Simple Aperture
        Photometry fluxes), but any data column within extension 1 of the FITS
        file can be used provided it is coupled to an error column name using
        errcol.
    errcol : str
        The uncertainty data coupled to datacol. Typically this column is
        called SAP_FLUX_ERR.
    fmin : float [1/day]
        The minimum frequency on which each power spectrum will be calculated.
    fmax : float [1/day]
        The maximum frequency on which each power spectrum will be calculated.
    nfreq : int
        The number of uniform frequency steps between fmin and fmax over which
        the power spectrum will be calculated.
    method : str
        Choose a method for calculating the power spectrum. Currently, only
        'ft', a discrete Fourier transform, is available.
    ntrials : int
        The number of Monte Carlo trials required before calculating the best
        periods, period uncertainty and confidence in the measurement.
    plot : bool
        Plot the output window function?
    overwrite : bool
        Overwrite the output file?
    verbose : bool
        Print informative messages and warnings to the shell and logfile?
    logfile : str
        Name of the logfile containing error and warning messages.
    """
    # startup parameters
    labelsize = 24
    ticksize = 16
    xsize = 18
    ysize = 6
    lcolor = '#0000ff'
    lwidth = 1.0
    fcolor = '#ffff00'
    falpha = 0.2

    # log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = ('KEPTRIAL -- ' + ' infile={}'.format(infile) +
            ' outfile={}'.format(outfile) + ' datacol={}'.format(datacol) +
            ' errcol={}'.format(errcol) + ' fmin={}'.format(fmin) +
            ' fmax={}'.format(fmax) + ' nfreq={}'.format(nfreq) +
            ' method={}'.format(method) + ' ntrials={}'.format(ntrials) +
            ' plot={}'.format(plot) + ' overwrite={}'.format(overwrite) +
            ' verbose={}'.format(verbose) + ' logfile={}'.format(logfile))

    kepmsg.log(logfile, call + '\n', verbose)

    # start time
    kepmsg.clock('KEPTRIAL started at', logfile, verbose)
    # overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = 'ERROR -- KEPTRIAL: {} exists. Use --overwrite'.format(
            outfile)
        kepmsg.err(logfile, errmsg, verbose)
    # open input file
    instr = pyfits.open(infile, 'readonly')
    # fudge non-compliant FITS keywords with no values
    instr = kepkey.emptykeys(instr, infile, logfile, verbose)
    # input data
    try:
        barytime = instr[1].data.field('barytime')
    except:
        barytime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
                                     verbose)
    signal = kepio.readfitscol(infile, instr[1].data, datacol, logfile,
                               verbose)
    err = kepio.readfitscol(infile, instr[1].data, errcol, logfile, verbose)
    # remove infinite data from time series
    try:
        nanclean = instr[1].header['NANCLEAN']
    except:
        incols = [barytime, signal, err]
        [barytime, signal, err] = kepstat.removeinfinlc(signal, incols)
    # frequency steps and Monte Carlo iterations
    deltaf = (fmax - fmin) / nfreq
    freq, pmax, trial = [], [], []
    for i in tqdm(range(ntrials)):
        trial.append(i + 1)
        # adjust data within the error bars
        work1 = kepstat.randarray(signal, err)
        # determine FT power
        fr, power = kepfourier.ft(barytime, work1, fmin, fmax, deltaf, False)
        # determine peak in FT
        pmax.append(-1.0e30)
        for j in range(len(fr)):
            if (power[j] > pmax[-1]):
                pmax[-1] = power[j]
                f1 = fr[j]
        freq.append(f1)
    # plot stop-motion histogram
    plt.figure()
    plt.clf()
    plt.axes([0.08, 0.08, 0.88, 0.89])
    plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
    plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
    n, bins, patches = plt.hist(freq,
                                bins=nfreq,
                                range=[fmin, fmax],
                                align='mid',
                                rwidth=1,
                                ec='#0000ff',
                                fc='#ffff00',
                                lw=2)
    # fit normal distribution to histogram
    x = np.zeros(len(bins))
    for j in range(1, len(bins)):
        x[j] = (bins[j] + bins[j - 1]) / 2
    pinit = np.array([float(i), freq[-1], deltaf])
    n = np.array(n, dtype='float32')
    coeffs, errors, covar, sigma, chi2, dof, fit, plotx, ploty = \
            kepfit.leastsquares(kepfunc.gauss, pinit, x[1:], n, None,
                                logfile, verbose)
    f = np.arange(fmin, fmax, (fmax - fmin) / 100)
    fit = kepfunc.gauss(coeffs, f)
    plt.plot(f, fit, 'r-', linewidth=2)
    plt.xlabel(r'Frequency (1/d)', {'color': 'k'})
    plt.ylabel('N', {'color': 'k'})
    plt.xlim(fmin, fmax)
    plt.grid()
    # render plot
    if plot:
        plt.show()
    # period results
    p = 1.0 / coeffs[1]
    perr = p * coeffs[2] / coeffs[1]
    f1 = fmin
    f2 = fmax
    gotbin = False
    for i in range(len(n)):
        if n[i] > 0 and not gotbin:
            f1 = bins[i]
            gotbin = True
    gotbin = False
    for i in range(len(n) - 1, 0, -1):
        if n[i] > 0 and not gotbin:
            f2 = bins[i + 1]
            gotbin = True
    powave, powstdev = np.mean(pmax), np.std(pmax)

    # print result
    print('              best period: %.10f days (%.7f min)' % (p, p * 1440.0))
    print('     1-sigma period error: %.10f days (%.7f min)' %
          (perr, perr * 1440.0))
    print('             search range: %.10f - %.10f days  ' %
          (1.0 / fmax, 1.0 / fmin))
    print('    100%% confidence range: %.10f - %.10f days  ' %
          (1.0 / f2, 1.0 / f1))
    print('         number of trials: %d' % ntrials)
    print(' number of frequency bins: %d' % nfreq)

    # history keyword in output file
    kepkey.history(call, instr[0], outfile, logfile, verbose)

    ## write output file
    col1 = pyfits.Column(name='TRIAL', format='J', array=trial)
    col2 = pyfits.Column(name='FREQUENCY',
                         format='E',
                         unit='1/day',
                         array=freq)
    col3 = pyfits.Column(name='POWER', format='E', array=pmax)
    cols = pyfits.ColDefs([col1, col2, col3])
    instr.append(pyfits.BinTableHDU.from_columns(cols))
    try:
        instr[-1].header['EXTNAME'] = ('TRIALS', 'Extension name')
    except:
        raise KeyError("Could not write EXTNAME to the header of the output"
                       " file")
    try:
        instr[-1].header['SEARCHR1'] = (1.0 / fmax,
                                        'Search range lower bound (days)')
    except:
        raise KeyError("Could not write SEARCHR1 to the header of the output"
                       " file")
    try:
        instr[-1].header['SEARCHR2'] = (1.0 / fmin,
                                        'Search range upper bound (days)')
    except:
        raise KeyError("Could not write SEARCHR2 to the header of the output"
                       " file")
    try:
        instr[-1].header['NFREQ'] = (nfreq, 'Number of frequency bins')
    except:
        raise KeyError("Could not write NFREQ to the header of the output"
                       " file")
    try:
        instr[-1].header['PERIOD'] = (p, 'Best period (days)')
    except:
        raise KeyError("Could not write PERIOD to the header of the output"
                       " file")
    try:
        instr[-1].header['PERIODE'] = (perr, '1-sigma period error (days)')
    except:
        raise KeyError("Could not write PERIODE to the header of the output"
                       " file")
    try:
        instr[-1].header['CONFIDR1'] = (1.0 / f2,
                                        'Trial confidence lower bound (days)')
    except:
        raise KeyError("Could not write CONFIDR1 to the header of the output"
                       " file")
    try:
        instr[-1].header['CONFIDR2'] = (1.0 / f1,
                                        'Trial confidence upper bound (days)')
    except:
        raise KeyError("Could not write CONFIDR2 to the header of the output"
                       " file")
    try:
        instr[-1].header['NTRIALS'] = (ntrials, 'Number of trials')
    except:
        raise KeyError("Could not write NTRIALS to the header of the output"
                       " file")
    instr.writeto(outfile)
    # close input file
    instr.close()
    ## end time
    kepmsg.clock('KEPTRAIL completed at', logfile, verbose)
Example #25
0
plt.ylabel('r^2 xi(r)')
plt.grid(True, which='both')
plt.savefig(save_location+'xir2_v2_{}_{}_{}.pdf'.format(pixels[0],pixels[-1],basedir[-7:-2]))

plt.figure()
plt.errorbar(R_binned,xi*(R_binned**2),yerr=err*(R_binned**2),fmt='o')
#plt.plot(R_binned,(mean*R_binned**2))
plt.xlabel('r [Mpc/h]')
plt.ylabel('r^2 xi(r)')
plt.grid(True, which='both')
plt.savefig(save_location+'xir2_v2_{}_{}_errors_{}.pdf'.format(pixels[0],pixels[-1],basedir[-7:-2]))

file_data_list = []
for result in results:
    file_data_list += [(result[0],result[1],result[2],result[3])]

dtype = [('bin_n', '>f4'), ('N_contributions_chunk', '>f4'), ('xi_chunk', '>f4'), ('del_squared_chunk', '>f4')]
file_data = np.array(file_data_list,dtype=dtype)

prihdr = fits.Header()
prihdu = fits.PrimaryHDU(header=prihdr)
cols_xi = fits.ColDefs(file_data)
hdu_xi = fits.BinTableHDU.from_columns(cols_xi,name='XI DATA')

hdulist = fits.HDUList([prihdu, hdu_xi])
hdulist.writeto(save_location+'xi_data_v2_{}_{}_{}.fits'.format(pixels[0],pixels[-1],basedir[-7:-2]))
hdulist.close

print('files saved to {}!'.format(save_location))
#plt.show()
Example #26
0
                          array=hdulist[1].data.field('ra_error')*mas_to_deg),
            pyfits.Column(name='dec_error', format='D', unqit='Angle[deg]',
                          array=hdulist[1].data.field('dec_error')*mas_to_deg),

            pyfits.Column(name='phot_g_mean_mag', format='D', unit='Magnitude[mag]',
                          array=hdulist[1].data.field('phot_g_mean_mag')),
            pyfits.Column(name='phot_g_mean_mag_error', format='D', unit='Magnitude[mag]',
                          array=mag_error),
        ]
        # for col in col_names:
        #     for tbcol in hdulist[1].columns:
        #         if (tbcol.name == col):
        #             columns.append(pyfits.Column(name=col,
        #                                          format=tbcol.format,
        #                                          disp=tbcol.disp,
        #                                          unit=tbcol.unit,
        #                                          array=hdulist[1].data.field(col)))
        #
        # columns.append(pyfits.Column(
        #     name='phot_g_mean_mag_error', array=mag_error, unit='Magnitude[mag]', format='D'))

        coldefs = pyfits.ColDefs(columns)
        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
        tbhdu.name = 'GAIA_CAT'

        # assemble the output FITS file
        # copy primary extension
        out_list = [hdulist[0], tbhdu]

        out_hdulist = pyfits.HDUList(out_list)
        out_hdulist.writeto(out_fn, overwrite=True)
Example #27
0
    intens_sum = intens_photo + intens_out
    ratio = intens_photo / intens_sum
    #ratio = 1 ###
    wlen_expand = wlen * 1e-6

    niu = const.c / wlen_expand

    ncflux = recal(intens_sum, cflux, niu, ctemp)
    comb_photo = ncflux * ratio / factor_photo
    comb_out = ncflux * (1 - ratio) / factor_out
    comb = comb_photo + comb_out
    comb = comb * np.e**(-obs_tau / loca)
    comb = comb + gggflux
    #comb = comb_photo + ngflux ###
    fspec = np.concatenate((fspec, comb))
    print i

fspec = fspec[1:]

col1 = fits.Column(name='GRAMS', format='I', array=gram)
col2 = fits.Column(name='C2H2', format='I', array=c2h2)
col4 = fits.Column(name='Fspec', format='365E', array=fspec)

cols = fits.ColDefs([col1, col2, col4])

tbhdu = fits.BinTableHDU.from_columns(cols)

tbhdu.writeto('/arrays/igloo1/ssp201701/fits/2012/9100.fits')
print('end')
Example #28
0
def calc(inputDataDict, outfile, indir="simha/", lib="miles"):
    from scipy.stats import chi2
    import CosmologicalDistance
    import os
    import logging

    logging.debug('Starting smass.calc()')

    cd = CosmologicalDistance.CosmologicalDistance()
    ldistDict = dict()
    splineDict = dict()
    splines, zmet = loadPopColors.doAll(indir, lib=lib)
    id = inputDataDict["id"]
    haloid = inputDataDict["haloid"]
    i = inputDataDict["i"]
    ierr = inputDataDict["ierr"]
    gr = inputDataDict["gr"]
    ri = inputDataDict["ri"]
    iz = inputDataDict["iz"]
    grerr = inputDataDict["grerr"]
    rierr = inputDataDict["rierr"]
    izerr = inputDataDict["izerr"]
    allzed = inputDataDict["zed"]
    GR_P_COLOR = inputDataDict["GR_P_COLOR"]
    RI_P_COLOR = inputDataDict["RI_P_COLOR"]
    IZ_P_COLOR = inputDataDict["IZ_P_COLOR"]
    P_RADIAL = inputDataDict["P_RADIAL"]
    P_REDSHIFT = inputDataDict["P_REDSHIFT"]
    P_MEMBER = inputDataDict["P_MEMBER"]
    DIST_TO_CENTER = inputDataDict["DIST_TO_CENTER"]
    GRP_RED = inputDataDict["GRP_RED"]
    GRP_BLUE = inputDataDict["GRP_BLUE"]
    RIP_RED = inputDataDict["RIP_RED"]
    RIP_BLUE = inputDataDict["RIP_BLUE"]
    IZP_RED = inputDataDict["IZP_RED"]
    IZP_BLUE = inputDataDict["IZP_BLUE"]

    #print zmet
    # protect against too small of errors => values = 0
    ix = np.nonzero(grerr < 0.02)
    grerr[ix] = 0.02
    ix = np.nonzero(rierr < 0.02)
    rierr[ix] = 0.02
    ix = np.nonzero(izerr < 0.02)
    izerr[ix] = 0.02

    # prepping for output
    out_id,out_haloid, out_gr, out_stdgr, out_gi, out_stdgi, \
        out_kri, out_stdkri, out_kii, out_stdkii, out_iobs, out_distmod, \
        out_bestzmet,out_stdzmet, out_rabs, out_iabs, out_mass_gr, out_mass_gi, out_mass, out_stdmass, out_sfr, out_stdsfr, out_age, out_stdage, \
        out_GR_P_COLOR,out_RI_P_COLOR,out_IZ_P_COLOR,out_P_RADIAL,out_P_REDSHIFT,out_P_MEMBER,out_DIST_TO_CENTER, \
        out_GRP_RED,out_GRP_BLUE,out_RIP_RED,out_RIP_BLUE,out_IZP_RED,out_IZP_BLUE,out_zmet, out_zed =\
        [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
    out_bestsp, out_bestchisq = [], []

    size = id.size
    #size = 10
    for galaxy in range(0, size):
        zed = allzed[galaxy]

        logging.debug('{i} of {len}, z = {z}'.format(i=galaxy + 1,
                                                     len=size,
                                                     z=zed))

        rest_gr, rest_gi, weight, chisqs = [], [], [], []
        masslight, sfrs, ages, zmets, kii, kri = [], [], [], [], [], []
        minChiSq = 999
        spIndex = -1
        for sp in range(0, len(splines)):
            # for speed
            skey = str(sp) + "-" + str(zed)
            #if skey in splineDict :
            #    sgr,sri,siz,sgrr,sgir,skii,skri,sml = splineDict[skey]
            #else :
            #    sgr = splines[sp][0](zed)
            #    sri = splines[sp][1](zed)
            #    siz = splines[sp][2](zed)
            #    sgrr = splines[sp][4](zed) ;# restframe g-r
            #    sgir = splines[sp][5](zed) ;# restframe g-i
            #    skii = splines[sp][6](zed) ;# kcorrection: i_o - i_obs
            #    skri = splines[sp][7](zed) ;# kcorrection: r_o - i_obs
            #    sml = splines[sp][8](zed) ;# log(mass/light)  (M_sun/L_sun)
            #    ssfr = splines[sp][9](zed)
            #    sage_cosmic = splines[sp][10](zed)
            #    sage = splines[sp][11](zed)
            #    zmet = splines[12]
            sgr = splines[sp][0](zed)
            sri = splines[sp][1](zed)
            siz = splines[sp][2](zed)
            sgrr = splines[sp][4](zed)
            # restframe g-r
            sgir = splines[sp][5](zed)
            # restframe g-i
            skii = splines[sp][6](zed)
            # kcorrection: i_o - i_obs
            skri = splines[sp][7](zed)
            # kcorrection: r_o - i_obs
            sml = splines[sp][8](zed)
            # log(mass/light)  (M_sun/L_sun)
            ssfr = splines[sp][9](zed)
            if (ssfr < -20.): ssfr = -20.
            sage_cosmic = splines[sp][10](zed)
            sage = splines[sp][11](zed)
            szmet = zmet[sp]
            #To be changed if SFH changes

            #splineDict[skey] = sgr,sri,siz,sgrr,sgir,skii,skri,sml

            gre = grerr[galaxy]
            rie = rierr[galaxy]
            ize = izerr[galaxy]
            gr_chisq = pow((gr[galaxy] - sgr) / gre, 2)
            ri_chisq = pow((ri[galaxy] - sri) / rie, 2)
            iz_chisq = pow((iz[galaxy] - siz) / ize, 2)
            rest_gr.append(sgrr)
            rest_gi.append(sgir)
            kii.append(skii)
            kri.append(skri)
            masslight.append(sml)
            sfrs.append(ssfr)
            ages.append(sage)
            zmets.append(szmet)
            chisq = gr_chisq + ri_chisq + iz_chisq
            probability = 1 - chi2.cdf(chisq, 3 - 1)
            # probability of chisq greater than this
            weight.append(probability)
            chisqs.append(chisq)
        spIndex = np.argmax(weight)
        rest_gr = np.array(rest_gr)
        rest_gi = np.array(rest_gi)
        kii = np.array(kii)
        kri = np.array(kri)
        masslight = np.array(masslight)
        sfrs = np.array(sfrs)
        idx_sfr = (sfrs < -8.)
        ages = np.array(ages)
        weight = np.array(weight)
        gr_weighted = rest_gr * weight
        gi_weighted = rest_gi * weight
        kii_weighted = kii * weight
        kri_weighted = kri * weight
        #weight_norm = weight/np.sum(weight)
        masslight_weighted = masslight * weight
        sfr_weighted = 10**sfrs[idx_sfr] * weight[idx_sfr]
        age_weighted = ages * weight
        zmet_weighted = zmets * weight
        w1 = weight.sum()
        w2 = (weight**2).sum()
        if w1 == 0: w1 = 1e-10
        if w2 == 0: w2 = 1e-10
        mean_gr = gr_weighted.sum() / w1
        mean_gi = gi_weighted.sum() / w1
        mean_kii = kii_weighted.sum() / w1
        mean_kri = kri_weighted.sum() / w1
        mean_masslight = masslight_weighted.sum() / w1
        #try :
        #    if weight.shape[0]>1.:
        #        mean_sfr = float(ws.numpy_weighted_median(sfrs, weights=weight)) #np.median(sfr_weighted) #sfr_weighted.sum()/w1
        #    else:
        #        mean_sfr = -70.
        #except :
        #    mean_sfr = -70.
        #print mean_sfr
        mean_age = age_weighted.sum() / w1
        mean_zmet = zmet_weighted.sum() / w1
        mean_sfr = sfr_weighted.sum() / w1
        # unbiased weighted estimator of the sample variance
        w3 = w1**2 - w2
        if w3 == 0: w3 = 1e-10
        var_gr = (w1 / w3) * (weight * (rest_gr - mean_gr)**2).sum()
        var_gi = (w1 / w3) * (weight * (rest_gi - mean_gi)**2).sum()
        var_kii = (w1 / w3) * (weight * (kii - mean_kii)**2).sum()
        var_kri = (w1 / w3) * (weight * (kri - mean_kii)**2).sum()
        var_masslight = (w1 / w3) * (weight *
                                     (masslight - mean_masslight)**2).sum()
        var_sfr = (w1 / w3) * (weight * (sfrs - mean_sfr)**2).sum()
        var_age = (w1 / w3) * (weight * (ages - mean_age)**2).sum()
        var_zmet = (w1 / w3) * (weight * (zmets - mean_zmet)**2).sum()
        std_gr = var_gr**0.5
        std_gi = var_gi**0.5
        std_kii = var_kii**0.5
        std_kri = var_kri**0.5
        std_masslight = var_masslight**0.5
        std_sfr = var_sfr**0.5
        std_age = var_age**0.5
        std_zmet = var_zmet**0.5
        if std_gr > 99.99: std_gr = 99.99
        if std_gi > 99.99: std_gi = 99.99
        if std_kii > 99.99: std_kii = 99.99
        if std_kri > 99.99: std_kri = 99.99
        if std_sfr > 99.99: std_sfr = 99.99
        if std_age > 99.99: std_age = 99.99
        if std_masslight > 99.99: std_masslight = 99.99
        if std_zmet > 99.99: std_zmet = 99.99
        # Comment -distanceModulus out for fsps versions <2.5, as their mags don't include distance modulus
        if zed in ldistDict:
            lumdist = ldistDict[zed]
        else:
            lumdist = cd.luminosity_distance(zed)
            # in Mpc
            ldistDict[zed] = lumdist
        distanceModulus = 5 * np.log10(lumdist * 1e6 / 10.)
        iabs = i[galaxy] + mean_kii - distanceModulus
        rabs = i[galaxy] + mean_kri - distanceModulus
        taMass = taylorMass(mean_gi, iabs)
        mcMass = mcintoshMass(mean_gr, rabs)
        fsMass = fspsMass(mean_masslight, iabs)
        # JTA: to make purely distance modulus
        #iabs = i[galaxy] - distanceModulus
        #fsMass = gstarMass( iabs )

        # saving for output
        out_id.append(id[galaxy])
        out_haloid.append(haloid[galaxy])
        out_gr.append(mean_gr)
        out_stdgr.append(std_gr)
        out_gi.append(mean_gi)
        out_stdgi.append(std_gi)
        out_kii.append(mean_kii)
        out_stdkii.append(std_kii)
        out_kri.append(mean_kri)
        out_stdkri.append(std_kri)
        out_iobs.append(i[galaxy])
        out_distmod.append(distanceModulus)
        out_iabs.append(iabs)
        out_rabs.append(rabs)
        out_mass_gr.append(mcMass)
        out_mass_gi.append(taMass)
        out_mass.append(fsMass)
        out_stdmass.append(std_masslight)
        out_bestsp.append(spIndex)
        out_bestzmet.append(zmets[spIndex])
        out_bestchisq.append(chisqs[spIndex])
        out_sfr.append(mean_sfr)
        out_stdsfr.append(std_sfr)
        out_age.append(mean_age)
        out_stdage.append(std_age)
        out_zmet.append(mean_zmet)
        out_stdzmet.append(std_zmet)
        out_zed.append(allzed[galaxy])
        out_GR_P_COLOR.append(GR_P_COLOR[galaxy])
        out_RI_P_COLOR.append(RI_P_COLOR[galaxy])
        out_IZ_P_COLOR.append(IZ_P_COLOR[galaxy])
        out_P_RADIAL.append(P_RADIAL[galaxy])
        out_P_REDSHIFT.append(P_REDSHIFT[galaxy])
        out_P_MEMBER.append(P_MEMBER[galaxy])
        out_DIST_TO_CENTER.append(DIST_TO_CENTER[galaxy])
        out_GRP_RED.append(GRP_RED[galaxy])
        out_GRP_BLUE.append(GRP_BLUE[galaxy])
        out_RIP_RED.append(RIP_RED[galaxy])
        out_RIP_BLUE.append(RIP_BLUE[galaxy])
        out_IZP_RED.append(IZP_RED[galaxy])
        out_IZP_BLUE.append(IZP_BLUE[galaxy])

    out_id = np.array(out_id).astype(int)
    out_haloid = np.array(out_haloid).astype(int)
    out_gr = np.array(out_gr)
    out_stdgr = np.array(out_stdgr)
    out_gi = np.array(out_gi)
    out_stdgi = np.array(out_stdgi)
    out_kii = np.array(out_kii)
    out_stdkii = np.array(out_stdkii)
    out_kri = np.array(out_kri)
    out_stdkri = np.array(out_stdkri)
    out_iobs = np.array(out_iobs)
    out_distmod = np.array(out_distmod)
    out_iabs = np.array(out_iabs)
    out_rabs = np.array(out_rabs)
    out_mass_gr = np.array(out_mass_gr)
    out_mass_gi = np.array(out_mass_gi)
    out_mass = np.array(out_mass)
    out_stdmass = np.array(out_stdmass)
    out_sfr = np.array(out_sfr)
    out_stdsfr = np.array(out_stdsfr)
    out_age = np.array(out_age)
    out_stdage = np.array(out_stdage)
    out_bestsp = np.array(out_bestsp)
    out_GR_P_COLOR = np.array(out_GR_P_COLOR)
    out_RI_P_COLOR = np.array(out_RI_P_COLOR)
    out_IZ_P_COLOR = np.array(out_IZ_P_COLOR)
    out_P_RADIAL = np.array(out_P_RADIAL)
    out_P_REDSHIFT = np.array(out_P_REDSHIFT)
    out_P_MEMBER = np.array(out_P_MEMBER)
    out_DIST_TO_CENTER = np.array(out_DIST_TO_CENTER)
    out_GRP_RED = np.array(out_GRP_RED)
    out_GRP_BLUE = np.array(out_GRP_BLUE)
    out_RIP_RED = np.array(out_RIP_RED)
    out_RIP_BLUE = np.array(out_RIP_BLUE)
    out_IZP_RED = np.array(out_IZP_RED)
    out_IZP_BLUE = np.array(out_IZP_BLUE)
    out_zmet = np.array(out_zmet)
    out_bestzmet = np.array(out_bestzmet)
    out_zed = np.array(out_zed)
    out_bestchisq = np.array(out_bestchisq)

    col1 = pf.Column(name='MEM_MATCH_ID', format='J', array=out_haloid)
    col2 = pf.Column(name='Z', format='E', array=out_zed)
    col3 = pf.Column(name='ID', format='K', array=out_id)
    col4 = pf.Column(name='gr_o', format='E', array=out_gr)
    col5 = pf.Column(name='gr_o_err', format='E', array=out_stdgr)
    col6 = pf.Column(name='gi_o', format='E', array=out_gi)
    col7 = pf.Column(name='gi_o_err', format='E', array=out_stdgi)
    col8 = pf.Column(name='kri', format='E', array=out_kri)
    col9 = pf.Column(name='kri_err', format='E', array=out_stdkri)
    col10 = pf.Column(name='kii', format='E', array=out_kii)
    col11 = pf.Column(name='kii_err', format='E', array=out_stdkii)
    col12 = pf.Column(name='iobs', format='E', array=out_iobs)
    col13 = pf.Column(name='distmod', format='E', array=out_distmod)
    col14 = pf.Column(name='rabs', format='E', array=out_rabs)
    col15 = pf.Column(name='iabs', format='E', array=out_iabs)
    col16 = pf.Column(name='mcMass', format='E', array=out_mass_gr)
    col17 = pf.Column(name='taMass', format='E', array=out_mass_gi)
    col18 = pf.Column(name='mass', format='E', array=out_mass)
    col19 = pf.Column(name='mass_err', format='E', array=out_stdmass)
    col20 = pf.Column(name='ssfr', format='E', array=out_sfr)
    col21 = pf.Column(name='ssfr_std', format='E', array=out_stdsfr)
    col22 = pf.Column(name='mass_weight_age', format='E', array=out_age)
    col23 = pf.Column(name='mass_weight_age_err', format='E', array=out_stdage)
    col24 = pf.Column(name='best_model', format='E', array=out_bestsp)
    col25 = pf.Column(name='best_zmet', format='E', array=out_bestzmet)
    col26 = pf.Column(name='zmet', format='E', array=out_zmet)
    col27 = pf.Column(name='best_chisq', format='E', array=out_bestchisq)
    col28 = pf.Column(name='GR_P_COLOR', format='E', array=out_GR_P_COLOR)
    col29 = pf.Column(name='RI_P_COLOR', format='E', array=out_RI_P_COLOR)
    col30 = pf.Column(name='IZ_P_COLOR', format='E', array=out_IZ_P_COLOR)
    col31 = pf.Column(name='P_RADIAL', format='E', array=out_P_RADIAL)
    col32 = pf.Column(name='P_REDSHIFT', format='E', array=out_P_REDSHIFT)
    col33 = pf.Column(name='P_MEMBER', format='E', array=out_P_MEMBER)
    col34 = pf.Column(name='DIST_TO_CENTER',
                      format='E',
                      array=out_DIST_TO_CENTER)
    col35 = pf.Column(name='GRP_RED', format='E', array=out_GRP_RED)
    col36 = pf.Column(name='GRP_BLUE', format='E', array=out_GRP_BLUE)
    col37 = pf.Column(name='RIP_RED', format='E', array=out_RIP_RED)
    col38 = pf.Column(name='RIP_BLUE', format='E', array=out_RIP_BLUE)
    col39 = pf.Column(name='IZP_RED', format='E', array=out_IZP_RED)
    col40 = pf.Column(name='IZP_BLUE', format='E', array=out_IZP_BLUE)

    cols = pf.ColDefs([
        col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11,
        col12, col13, col14, col15, col16, col17, col18, col19, col20, col21,
        col22, col23, col24, col25, col26, col27, col28, col29, col30, col31,
        col32, col33, col34, col35, col36, col37, col38, col39, col40
    ])
    tbhdu = pf.BinTableHDU.from_columns(cols)
    tbhdu.writeto(outfile, clobber=True)

    data = np.array([out_id, out_haloid, out_gr, out_stdgr, out_gi, out_stdgi, \
        out_kri, out_stdkri, out_kii, out_stdkii, out_iobs, out_distmod, \
        out_rabs, out_iabs, out_mass_gr, out_mass_gi, out_mass, out_stdmass, \
        out_zed,out_sfr, out_stdsfr, out_age, out_stdage, out_bestsp,out_bestzmet,out_zmet,out_GR_P_COLOR,out_RI_P_COLOR,out_IZ_P_COLOR,out_P_RADIAL,out_P_REDSHIFT,out_P_MEMBER,out_DIST_TO_CENTER, \
        out_GRP_RED,out_GRP_BLUE,out_RIP_RED,out_RIP_BLUE,out_IZP_RED,out_IZP_BLUE])
    #header = "# id, haloid, gr_o,  std,  gi_o, std, kri, std, kii, std, i,  distmod, "
    #header = header + "r_abs, i_abs, mcMass, taMass, mass, std, zed, sfr, sfrstd, age, agestd, bestsp,best_zmet,mean_zmet \
    #     out_GR_P_COLOR,out_RI_P_COLOR,out_IZ_P_COLOR,out_GR_P_MEMBER,out_RI_P_MEMBER,out_IZ_P_MEMBER,out_DIST_TO_CENTER, \
    #    out_GRP_RED,out_GRP_BLUE,out_RIP_RED,out_RIP_BLUE,out_IZP_RED,out_IZP_BLUE\n"
    #fd = open(outfile,"w")
    #fd.write(header)
    #fd.close()
    #np.savetxt(outfile+".dat", data.T, "%d,%d,%6.3f,%6.4f,%6.3f,%6.4f,%6.3f,%6.4f,%6.3f,%6.4f,\
    #     %6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.4f,%6.3f,%6.3f,%6.3f,%d,%6.3f, %6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.3f,%6.4f,%6.3f,%6.3f,%6.3f,%6.3f,%6.3f, %6.3f")
    #os.system("cat {} >> {}; rm {}".format(outfile+".dat", outfile, outfile+".dat"))

    logging.debug('Returning from smass.calc()')
Example #29
0
def tsys_writeudbfits(xdat, calflag):
    ''' This takes the dictionary xdat of rd_miriad_tsys_file and
    creates a UDB FITS file. Unlike the original UDB fits files this
    puts the tsys data as the primary output because I cannot figure
    out how to get a multidimensional array into a column...'''
    file_out = ''

    if xdat == None or len(xdat) == 0:
        print 'tsys_writeudbfits: No data input'
        return file_out
#   end

#UDB fits files
    udbfitsdir = '/data1/eovsa/fits/'

    #create a filename
    file0 = xdat['file0'].split("/")
    lf0 = len(file0)
    file0 = file0[lf0 - 1]
    print file0
    yr = file0[3:7]
    yr2 = file0[5:7]
    mn = file0[7:9]
    dy = file0[9:11]
    hh = file0[11:13]
    mm = file0[13:15]
    ss = file0[15:]
    file_out = 'eovsa_1-18GHz_sp_' + yr + mn + dy + '_' + hh + mm + ss + '.fts'
    #add directory
    outdir = udbfitsdir + yr + mn + dy
    if os.path.isdir(outdir) == False:
        print "tsys_writeudbfits: creating " + outdir
        os.mkdir(outdir)
    #end if
    file_out = outdir + '/' + file_out

    date_obs = yr + '-' + mn + '-' + dy + 'T' + hh + ':' + mm + ':' + ss + '.000'
    #Convert to unix time, and add seconds to get date_end
    #http://astropy.readthedocs.org/en/latest/time
    ut = xdat['ut_mjd']
    print "date_obs: ", date_obs
    t0 = Time(date_obs)
    dt = int(86400 * (max(ut) - min(ut)))
    t1 = Time(t0.unix + dt, format='unix')
    date_end = t1.isot

    # Create the primary header
    version = xdat['version']
    if version == "1.0":
        tpwr = xdat['tsys']
    else:
        tpwr = xdat['tpwr']
    #endif
    hdu = fits.PrimaryHDU(tpwr)

    # Set up the extensions: sfreq, sdf, ut
    sfreq = xdat['sfreq']
    col1 = fits.Column(name='sfreq', format='E', array=sfreq)
    cols1 = fits.ColDefs([col1])
    tbhdu1 = fits.BinTableHDU.from_columns(cols1)
    #    tbhdu1.update_ext_name('SFREQ')
    tbhdu1.name = 'SFREQ'
    sdf = xdat['sdf']
    col2 = fits.Column(name='sdf', format='E', array=sdf)
    cols2 = fits.ColDefs([col2])
    tbhdu2 = fits.BinTableHDU.from_columns(cols2)
    tbhdu2.name = 'SDF'
    #    ut = xdat['ut']
    # Split up mjd into days and msec, really intuitive syntax, thanks python
    ut_int = ut.astype(np.int32)
    ut_msec = 1000.0 * 86400.0 * (ut - ut_int)
    ut_ms1 = ut_msec.astype(np.int32)

    # J is the format code for a 32 bit integer, who would have thought
    # http://astropy.readthedocs.org/en/latest/io/fits/usage/table.html
    col3 = fits.Column(name='mjd', format='J', array=ut_int)
    col4 = fits.Column(name='time', format='J', array=ut_ms1)

    cols3 = fits.ColDefs([col3, col4])
    tbhdu3 = fits.BinTableHDU.from_columns(cols3)
    tbhdu3.name = 'UT'

    #create an HDUList object to put in header information
    hdulist = fits.HDUList([hdu, tbhdu1, tbhdu2, tbhdu3])

    #primary header
    prihdr = hdulist[0].header
    #Header information, strip last character from strings
    obj_id = xdat['source']
    obj_id = obj_id[:len(obj_id) - 1]
    scan_id = xdat['scanid']
    scan_id = scan_id[:len(scan_id) - 1]
    proj_id = xdat['proj']
    proj_id = proj_id[:len(proj_id) - 1]
    proj_id = strip_non_printable(proj_id)
    ant_list = xdat['antennalist']
    ant_list = ant_list[:len(ant_list) - 1]

    temp_out = file_out.split("/")
    temp_out = temp_out[len(temp_out) - 1]
    prihdr.set('FILENAME', temp_out)
    prihdr.set('ORIGIN', 'NJIT', 'Institute where file was written')
    prihdr.set('TELESCOP', 'EOVSA', 'Expanded Owens Valley Solar Array')
    prihdr.set('OBJ_ID', obj_id, 'Object ID')
    prihdr.set('SCAN_ID', scan_id, 'Scan ID for this dataset')
    prihdr.set('PROJECT_', proj_id, 'EOVSA Project ID')
    prihdr.set('ID', int(yr2 + mn + dy + hh + mm + ss),
               'Catalog ID, yymmddhhmm')
    prihdr.set('TYPE', 1, 'Spectrum')
    prihdr.set('DATE_OBS', date_obs, 'Start date/time of observation')
    prihdr.set('DATE_END', date_end, 'End date/time of observation')
    prihdr.set('FREQMIN', min(sfreq), 'Min freq in observation (GHz)')
    prihdr.set('FREQMAX', max(sfreq), 'Max freq in observation (GHz)')
    prihdr.set('XCEN', 0.0, 'Antenna pointing in arcsec from Sun centre')
    prihdr.set('YCEN', 0.0, 'Antenna pointing in arcsec from Sun centre')
    prihdr.set('POLARIZA', 'XX, YY', 'Polarizations present')
    prihdr.set('RESOLUTI', 0.0, 'Resolution value')
    prihdr.set('NANTS', xdat['nants'], 'Number of Antennae')
    prihdr.set('ANTENNA', ant_list, 'Used antennae')
    prihdr.set('VERSION', version, 'SW version')
    if (calflag == True):
        prihdr.set('CAL_FLAG', 1,
                   'Calibration Flag: 1 for calibrated, 0 for not')
    else:
        prihdr.set('CAL_FLAG', 0,
                   'Calibration Flag: 1 for calibrated, 0 for not')
    #endif
# Write the file
    hdulist.writeto(file_out, clobber=True)

    return file_out
Example #30
0
def daily_xsp_writefits(xdat, pdata):
    '''This takes the dictionary output from a read_idb, and pdata from
    daily_xsp, and creates a FITS file. The data is the primary output.
    '''

    file_out = ''
    if xdat == None or len(xdat) == 0:
        print 'xsp_writefits: No data input'
        return file_out
#   end

#UDB allday fits files
    xspfitsdir = '/data1/eovsa/fits/XSP/'
    if os.path.isdir(xspfitsdir) == False:
        print "daily_xsp_writefits: creating " + xspfitsdir
        os.mkdir(xspfitsdir)
    #end if


#create a filename, just use the start time
    t = xdat['time']
    print t[0]
    t0 = Time(t[0], format='jd'
              )  #The format is to tell the Time object about the input time
    t01 = t0.isot
    yr = t01[0:4]
    mn = t01[5:7]
    dy = t01[8:10]
    file_out = 'XSP' + yr + mn + dy + '.fts'
    #add directory
    outdir = xspfitsdir + '/' + yr + '/'
    if os.path.isdir(outdir) == False:
        print "daily_xsp_writefits: creating " + outdir
        os.mkdir(outdir)
    #end if
    file_out = outdir + '/' + file_out

    date_obs = t01
    #Convert to unix time, and add seconds to get date_end
    #http://astropy.readthedocs.org/en/latest/time
    print "date_obs: ", date_obs
    t0 = Time(date_obs)
    dt = int(86400 * (max(t) - min(t)))
    t1 = Time(t0.unix + dt, format='unix')
    date_end = t1.isot
    print "date_end: ", date_end

    # Create the primary header
    tpwr = pdata
    hdu = fits.PrimaryHDU(tpwr)

    # Set up the extensions: sfreq, ut
    sfreq = xdat['fghz']
    col1 = fits.Column(name='sfreq', format='E', array=sfreq)
    cols1 = fits.ColDefs([col1])
    tbhdu1 = fits.BinTableHDU.from_columns(cols1)
    tbhdu1.name = 'SFREQ'

    # Split up mjd into days and msec, really intuitive syntax, thanks python
    ut = Time(t, format='jd')  #Format is for the input time
    ut = ut.mjd
    ut_int = ut.astype(np.int32)
    ut_msec = 1000.0 * 86400.0 * (ut - ut_int)
    ut_ms1 = ut_msec.astype(np.int32)

    # J is the format code for a 32 bit integer, who would have thought
    # http://astropy.readthedocs.org/en/latest/io/fits/usage/table.html
    col3 = fits.Column(name='mjd', format='J', array=ut_int)
    col4 = fits.Column(name='time', format='J', array=ut_ms1)

    cols3 = fits.ColDefs([col3, col4])
    tbhdu3 = fits.BinTableHDU.from_columns(cols3)
    tbhdu3.name = 'UT'

    #create an HDUList object to put in header information
    hdulist = fits.HDUList([hdu, tbhdu1, tbhdu3])

    #primary header
    prihdr = hdulist[0].header
    #Header information, strip last character from strings
    obj_id = strip_non_printable(xdat['source'])
    temp_out = file_out.split("/")
    temp_out = temp_out[len(temp_out) - 1]
    prihdr.set('FILENAME', temp_out)
    prihdr.set('ORIGIN', 'NJIT', 'Institute where file was written')
    prihdr.set('TELESCOP', 'EOVSA', 'Expanded Owens Valley Solar Array')
    prihdr.set('OBJ_ID', obj_id, 'Object ID')
    prihdr.set('TYPE', 1, 'Spectrum')
    prihdr.set('DATE_OBS', date_obs, 'Start date/time of observation')
    prihdr.set('DATE_END', date_end, 'End date/time of observation')
    prihdr.set('FREQMIN', min(sfreq), 'Min freq in observation (GHz)')
    prihdr.set('FREQMAX', max(sfreq), 'Max freq in observation (GHz)')
    prihdr.set('XCEN', 0.0, 'Antenna pointing in arcsec from Sun centre')
    prihdr.set('YCEN', 0.0, 'Antenna pointing in arcsec from Sun centre')
    prihdr.set('POLARIZA', 'XX, YY', 'Polarizations present')
    prihdr.set('RESOLUTI', 0.0, 'Resolution value')
    # Write the file
    hdulist.writeto(file_out, clobber=True)

    return file_out