Пример #1
0
def print_par(filename, par):
    """ Print the maximum likelihood parameters and their
    uncertainties.
    """
    rec = []
    for i in range(len(P.names)):
        p = P.ml[i]
        m1 = P.p1sig[i]
        m2 = P.p2sig[i]
        j1 = P.p1sig_joint[i]
        j2 = P.p2sig_joint[i]
        rec.append((P.names[i], p, p - j1[0], j1[1] - p, p - j2[0], j2[1] - p,
                    p - m1[0], m1[1] - p, p - m2[0], m2[1] - p))

    names = 'name,ml,j1l,j1u,j2l,j2u,m1l,m1u,m2l,m2u'
    rec = np.rec.fromrecords(rec, names=names)

    hd = """\
# name : parameter name
# ml   : maximum likelihood value
# j1l  : 1 sigma lower error (joint with all other parameters) 
# j1u  : 1 sigma upper error (joint)
# j2l  : 2 sigma lower error (joint) 
# j2u  : 2 sigma upper error (joint) 
# m1l  : 1 sigma lower error (marginalised over all other parameters)
# m1u  : 1 sigma upper error (marginalised)
# m2l  : 2 sigma lower error (marginalised) 
# m2u  : 2 sigma upper error (marginalised) 
"""
    #pdb.set_trace()
    writetxt('pars.txt', rec, header=hd, fmt_float='.4g', overwrite=1)
Пример #2
0
Файл: model.py Проект: nhmc/H2
def print_par(par):
    """ Print the maximum likelihood parameters and their
    uncertainties.
    """
    rec = []
    for i in range(len(P.names)):
        p = P.ml[i]
        m1 = P.p1sig[i]
        m2 = P.p2sig[i]
        j1 = P.p1sig_joint[i]
        j2 = P.p2sig_joint[i]
        rec.append( (P.names[i], p,  p - j1[0], j1[1] - p,
                     p - j2[0], j2[1] - p, p - m1[0], m1[1] - p,
                     p - m2[0], m2[1] - p) )

    names = 'name,ml,j1l,j1u,j2l,j2u,m1l,m1u,m2l,m2u'
    rec = np.rec.fromrecords(rec, names=names)

    hd = """\
# name : parameter name
# ml   : maximum likelihood value
# j1l  : 1 sigma lower error (joint with all other parameters) 
# j1u  : 1 sigma upper error (joint)
# j2l  : 2 sigma lower error (joint) 
# j2u  : 2 sigma upper error (joint) 
# m1l  : 1 sigma lower error (marginalised over all other parameters)
# m1u  : 1 sigma upper error (marginalised)
# m2l  : 2 sigma lower error (marginalised) 
# m2u  : 2 sigma upper error (marginalised) 
"""
    #pdb.set_trace()
    writetxt('parameters.txt', rec, header=hd, fmt_float='.8g', overwrite=1)
Пример #3
0
def writeLSF_vpfit(wa, dw):
    """ Write a file giving the COS line spread function at wavelength
    wa for constant pixel width dw (both in Angstroms), suitable for
    input to VPFIT.
    """
    outname = 'LSF/LSF_%.1f.txt' % wa
    if wa < 1450:
        lsf, _ = readLSF('G130M', dw)
    else:
        lsf, _ = readLSF('G160M', dw)

    wavs = [float(n[1:]) for n in lsf.dtype.names[1:]]
    lsf1 = np.array([lsf[n] for n in lsf.dtype.names[1:]])

    newLSF = []
    for ipix in range(lsf1.shape[1]):
        newLSF.append(np.interp(wa, wavs, lsf1[:, ipix]))

    writetxt(outname, [lsf.relpix, newLSF], overwrite=1)
Пример #4
0
Файл: utils.py Проект: nhmc/COS
def writeLSF_vpfit(wa, dw):
    """ Write a file giving the COS line spread function at wavelength
    wa for constant pixel width dw (both in Angstroms), suitable for
    input to VPFIT.
    """ 
    outname = 'LSF/LSF_%.1f.txt' % wa
    if wa < 1450:
        lsf, _ = readLSF('G130M', dw)
    else:
        lsf, _ = readLSF('G160M', dw)
 
    wavs = [float(n[1:]) for n in lsf.dtype.names[1:]]
    lsf1 = np.array([lsf[n] for n in lsf.dtype.names[1:]])
 
    newLSF = []
    for ipix in range(lsf1.shape[1]):
        newLSF.append(np.interp(wa, wavs, lsf1[:, ipix]))

    writetxt(outname, [lsf.relpix, newLSF], overwrite=1)
Пример #5
0
Файл: model.py Проект: nhmc/LAE
def print_par(par):
    """ Print the maximum likelihood parameters and their
    uncertainties.
    """
    rec = []
    for i in range(len(P['names'])):
        p = P['ml'][i]
        pmed = P['median'][i]
        m1 = P['p1sig'][i]
        p0 = 0.5 * (m1[0] + m1[1])
        sig1 = 0.5 * (m1[1] - m1[0]) 
        m2 = P['p2sig'][i]
        j1 = P['p1sig_joint'][i]
        j2 = P['p2sig_joint'][i]
        rec.append( (P['names'][i], p0, sig1, m1[0], m1[1],
                     m2[0], m2[1], j1[0], j1[1],
                     j2[0], j2[1], pmed, p) )

    names = 'name,cen,sig,m1l,m1u,m2l,m2u,j1l,j1u,j2l,j2u,med,ml'
    rec = np.rec.fromrecords(rec, names=names)

    hd = """\
# name : parameter name
# cen  : central value (half way between the marginalised 1 sigma region)
# sig  : 1 sigma error around central value
# m1l  : 1 sigma lower level (marginalised over all other parameters)
# m1u  : 1 sigma upper level (marginalised)
# m2l  : 2 sigma lower level (marginalised) 
# m2u  : 2 sigma upper level (marginalised) 
# j1l  : 1 sigma lower level (joint with all other parameters) 
# j1u  : 1 sigma upper level (joint)
# j2l  : 2 sigma lower level (joint) 
# j2u  : 2 sigma upper level (joint) 
# ml   : maximum likelihood value
# med  : median value
"""
    from barak.io import writetxt
    writetxt('fig/pars.txt', rec, header=hd, fmt_float='.4g', overwrite=1)
Пример #6
0
Файл: model.py Проект: nhmc/LAE
def print_par(par):
    """ Print the maximum likelihood parameters and their
    uncertainties.
    """
    rec = []
    for i in range(len(P['names'])):
        p = P['ml'][i]
        pmed = P['median'][i]
        m1 = P['p1sig'][i]
        p0 = 0.5 * (m1[0] + m1[1])
        sig1 = 0.5 * (m1[1] - m1[0]) 
        m2 = P['p2sig'][i]
        j1 = P['p1sig_joint'][i]
        j2 = P['p2sig_joint'][i]
        rec.append( (P['names'][i], p0, sig1, m1[0], m1[1],
                     m2[0], m2[1], j1[0], j1[1],
                     j2[0], j2[1], pmed, p) )

    names = 'name,cen,sig,m1l,m1u,m2l,m2u,j1l,j1u,j2l,j2u,med,ml'
    rec = np.rec.fromrecords(rec, names=names)

    hd = """\
# name : parameter name
# cen  : central value (half way between the marginalised 1 sigma region)
# sig  : 1 sigma error around central value
# m1l  : 1 sigma lower level (marginalised over all other parameters)
# m1u  : 1 sigma upper level (marginalised)
# m2l  : 2 sigma lower level (marginalised) 
# m2u  : 2 sigma upper level (marginalised) 
# j1l  : 1 sigma lower level (joint with all other parameters) 
# j1u  : 1 sigma upper level (joint)
# j2l  : 2 sigma lower level (joint) 
# j2u  : 2 sigma upper level (joint) 
# ml   : maximum likelihood value
# med  : median value
"""
    from barak.io import writetxt
    writetxt('fig/pars.txt', rec, header=hd, fmt_float='.4g', overwrite=1)
Пример #7
0
                unused.append(n)
            else:
                unknown.append(n)

        print len(biases), 'biases'
        print len(objects), 'imaging targets found:'
        print ' ', '\n  '.join(textwrap.wrap(' '.join(objects)))
        print len(flats), 'flats found:'
        print ' ', '\n  '.join(textwrap.wrap(' '.join(flats)))
        print len(unknown), 'unidentified exposures:'
        print ' ', '\n  '.join(textwrap.wrap(' '.join(unknown)))
        saveobj('_sort_LBC.sav',
                dict(biases=biases, objects=objects, flats=flats))
        # could be a bug writing out an empty file?

        writetxt('sort_LBC_unused', [unused], overwrite=1)

if 1:
    # make links to the biases
    if len(biases) > 0:
        makedir('bias')
    for arm in biases:
        biasdir = 'bias/' + arm_str[arm]
        makedir(biasdir)
        makedir(biasdir + '/raw', clean=True)
        names = []
        for filename in sorted(biases[arm]):
            n = filename.rsplit('/')[-1]
            s = 'ln -s ../../../raw/%s %s/raw/%s' % (n, biasdir, n)
            call(s, shell=1)
            names.append(n.replace('.fits.gz', '.fits'))
Пример #8
0
                     imtype == 'flat' and hd['NEXTEND'] == 1:
                unused.append(n)
            else:
                unknown.append(n)

        print len(biases), 'biases'
        print len(objects), 'imaging targets found:'
        print ' ', '\n  '.join(textwrap.wrap(' '.join(objects)))
        print len(flats), 'flats found:'
        print ' ', '\n  '.join(textwrap.wrap(' '.join(flats)))
        print len(unknown), 'unidentified exposures:'
        print ' ', '\n  '.join(textwrap.wrap(' '.join(unknown)))
        saveobj('_sort_LBC.sav', dict(biases=biases,objects=objects,flats=flats))
        # could be a bug writing out an empty file?
        
        writetxt('sort_LBC_unused', [unused], overwrite=1)

if 1:
    # make links to the biases
    if len(biases) > 0:
        makedir('bias')
    for arm in biases:
        biasdir = 'bias/' + arm_str[arm]
        makedir(biasdir)
        makedir(biasdir + '/raw', clean=True)
        names = []
        for filename in sorted(biases[arm]):
            n = filename.rsplit('/')[-1]
            s = 'ln -s ../../../raw/%s %s/raw/%s' % (n, biasdir, n)
            call(s, shell=1)
            names.append(n.replace('.fits.gz', '.fits'))