Ejemplo n.º 1
0
def x_contifit(specfil, outfil=None, savfil=None, redshift=0., divmult=1, forest_divmult=1):

    import os
    import barak.fitcont as bf
    from barak.spec import read
    from barak.io import saveobj, loadobj
    import xastropy.spec.readwrite as xsr
    reload(xsr)
    reload(bf)

    # Initialize
    if savfil == None:
        savfil = 'conti.sav'
    if outfil == None:
        outfil = 'conti.fits'
        
    # Read spectrum + convert to Barak format
    sp = xsr.readspec(specfil)
    

    # Fit spline continuum:
    if os.path.lexists(savfil): #'contfit_' + name + '.sav'):
        option = raw_input('Adjust old continuum? (y)/n: ')
        if option.lower() != 'n':
            co_old, knots_old = loadobj(savfil) #'contfit_' + name + '.sav')
            co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
                oldco=co_old, knots=knots_old,
                divmult=divmult,
                forest_divmult=forest_divmult)
        else:
            co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
                divmult=divmult,
                forest_divmult=forest_divmult)
    else:
        co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
            divmult=divmult,
            forest_divmult=forest_divmult)
    
    os.remove('_knots.sav')

    # Save continuum:
    saveobj(savfil, (co, knots), overwrite=1)

    # Check continuum:
    print('Plotting new continuum')
    plt.clf()
    plt.plot(sp.wa, sp.fl, drawstyle='steps-mid')
    plt.plot(sp.wa, sp.co, color='r')
    plt.show()

    # Repeat?
    confirm = raw_input('Keep continuum? (y)/n: ')
    if confirm == 'y':
        fits.writeto(outfil, sp, clobber=True)
    else:
        print('Writing to tmp.fits anyhow!')
        fits.writeto('tmp.fits', sp, clobber=True)
Ejemplo n.º 2
0
Archivo: run_mcmc.py Proyecto: nhmc/LAE
def run_mcmc(sampler, opt):

    print 'Reading initial state from sample_burn.sav'
    burn_in = loadobj('samples_burn.sav.gz')
    sampler.reset()

    # Starting from the final position in the burn-in chain, sample for 1500
    # steps. (rstate0 is the state of the internal random number generator)

    # note the results are saved in the sampler object.
    iprint = opt.iprint
    print "Running MCMC with %i steps" % opt.Nmcmc
    for i,(pos, lnprob, state) in enumerate(sampler.sample(
        burn_in['final_pos'], iterations=opt.Nmcmc, rstate0=burn_in['state'])):
        i += 1
        if not i % iprint:
            print i

    print 'Saving results to samples_mcmc.sav'
    save_samples('samples_mcmc.sav.gz', sampler, pos, state)
Ejemplo n.º 3
0
def read_redmapper():
    d = fits.getdata(prefix + 'clusters/redmapper/'
                     'dr8_run_redmapper_v5.10_lgt5_catalog.fits')
    #d = fits.getdata(prefix + 'clusters/redmapper/DR8/'
    #                 'dr8_run_redmapper_v5.10_lgt5_catalog.fit')

    z = d['Z_LAMBDA']
    c0 = d['BCG_SPEC_Z'] != -1
    z[c0] = d['BCG_SPEC_Z'][c0]
    zer = d['Z_LAMBDA_E']
    if CLUS_ZERR == 'erphotz':
        zer[c0] = 0.001
    elif isinstance(CLUS_ZERR, float):
        zer[:] = CLUS_ZERR
    else:
        raise ValueError

    # 0.005 corresponds to a velocity dispersion of 937 km/s at z=0.6
    zer = np.where(zer < 0.005, 0.005, zer)

    if os.path.exists('dc_redmapper.sav'):
        rlos = loadobj('dc_redmapper.sav')
        assert len(rlos) == len(d)
    else:
        # this takes about 5 min to run
        print 'calculating comoving distances'
        rlos = cosmo.comoving_distance(z)
        saveobj('dc_redmapper.sav', rlos)

    # in solar masses, conversion from Rykoff 2013 appendix B.
    m200 = m200_from_richness(d['LAMBDA_CHISQ'])

    d1 = np.rec.fromarrays([
        d.RA, d.DEC, z, zer, d.LAMBDA_CHISQ, d.MEM_MATCH_ID, rlos.value, m200
    ],
                           names='ra,dec,z,zer,richness,id,rlos,m200')
    d2 = d1[d1.z > ZMIN_CLUS]
    d3 = d2[between(np.log10(d2['m200']), log10MINMASS, log10MAXMASS)]

    iclus_from_id = {idval: i for i, idval in enumerate(d3.id)}
    return d3, iclus_from_id
Ejemplo n.º 4
0
def read_redmapper():
    #d = fits.getdata(prefix + 'clusters/redmapper/'
    #                 'dr8_run_redmapper_v5.10_lgt5_catalog.fits')
    d = fits.getdata(prefix + 'clusters/redmapper/DR8/'
                     'dr8_run_redmapper_v5.10_lgt5_catalog.fit')

    z = d['Z_LAMBDA']
    c0 = d['BCG_SPEC_Z'] != -1 
    z[c0] = d['BCG_SPEC_Z'][c0]
    zer = d['Z_LAMBDA_E']
    if CLUS_ZERR == 'erphotz':
        zer[c0] = 0.001
    elif isinstance(CLUS_ZERR, float):
        zer[:] = CLUS_ZERR
    else:
        raise ValueError

    # 0.005 corresponds to a velocity dispersion of 937 km/s at z=0.6 
    zer = np.where(zer < 0.005, 0.005, zer)

    if os.path.exists('dc_redmapper.sav'):
        rlos = loadobj('dc_redmapper.sav')
        assert len(rlos) == len(d)
    else:
        # this takes about 5 min to run
        print 'calculating comoving distances'
        rlos = cosmo.comoving_distance(z)
        saveobj('dc_redmapper.sav', rlos)

    # in solar masses, conversion from Rykoff 2013 appendix B.
    m200 = m200_from_richness(d['LAMBDA_CHISQ'])


    d1 = np.rec.fromarrays([d.RA, d.DEC, z, zer,
                            d.LAMBDA_CHISQ, d.MEM_MATCH_ID, rlos.value, m200],
                           names='ra,dec,z,zer,richness,id,rlos,m200')
    d2 = d1[between(d1.z, ZMIN_CLUS, ZMAX_CLUS)]
    d3 = d2[between(np.log10(d2['m200']), log10MINMASS, log10MAXMASS)]

    iclus_from_id = {idval:i for i,idval in enumerate(d3.id)}
    return d3, iclus_from_id
Ejemplo n.º 5
0
def run_mcmc(sampler, opt):

    print 'Reading initial state from sample_burn.sav'
    burn_in = loadobj('samples_burn.sav.gz')
    sampler.reset()

    # Starting from the final position in the burn-in chain, sample for 1500
    # steps. (rstate0 is the state of the internal random number generator)

    # note the results are saved in the sampler object.
    iprint = opt.iprint
    print "Running MCMC with %i steps" % opt.Nmcmc
    for i, (pos, lnprob, state) in enumerate(
            sampler.sample(burn_in['final_pos'],
                           iterations=opt.Nmcmc,
                           rstate0=burn_in['state'])):
        i += 1
        if not i % iprint:
            print i

    print 'Saving results to samples_mcmc.sav'
    save_samples('samples_mcmc.sav.gz', sampler, pos, state)
Ejemplo n.º 6
0
    # absorber id for a cluster-absorber pair
    # pair ids where the cluster in the pair is near an absorber
    # cluster id
    # qso id
    # total zpath over all pairs

    #LOGBINS = False
    #rbin = Bins(np.arange(0, 11, 1))

    LOGBINS = True
    rbin = Bins(np.arange(-1.4, 1.61, 0.2))

    outname = run_id + '/rho_dNdz_clus.sav'
    if os.path.exists(outname):
        print 'Reading', outname
        rho = loadobj(outname)
    else:
        rho = [dict(zpathlim=[], abid=[], pid=[], cid=[], qid=[],
                    Wr=[], Wre=[], zpathtot=0) for i in xrange(len(rbin.cen))]
    
        # find tot zpath (including both field and cluster paths up to
        # z=1, only towards sightlines with a nearby cluster though) also?
    
        print 'Calculating MgII hits close to clusters, and the total z path length'
    
        if DEBUG:
            fig4 = plt.figure(4, figsize=(6,6))
            ax = fig4.add_subplot(111)

        print 'Looping over QSOs'
        for i,(qid,ind) in enumerate(indgroupby(pairs, 'qid')):
Ejemplo n.º 7
0
def make_interpolators_uvbtilt(trans, simnames):
    """ Make interpolators including different UV slopes, given by the
    simulation names.

    simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),

    uvb k values must be sorted in ascending order!
    """

    Models = []
    aUV = []
    for simname in simnames:
        # need to define prefix, SIMNAME
        gridname = os.path.join(simname, 'grid.cfg')

        print 'Reading', gridname
        cfg = parse_config(gridname)
        aUV.append(cfg.uvb_tilt)

        name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
        print 'Reading', name
        M = loadobj(name)
        M = adict(M)

        Uconst = (M.U + M.nH)[0]
        print 'Uconst', Uconst, cfg.uvb_tilt
        assert np.allclose(Uconst, M.U + M.nH)
        Models.append(M)

    ##########################################################################
    # Interpolate cloudy grids onto a finer scale for plotting and
    # likelihood calculation
    ##########################################################################

    roman_map = {
        'I': 0,
        'II': 1,
        'III': 2,
        'IV': 3,
        'V': 4,
        'VI': 5,
        'VII': 6,
        'VIII': 7,
        'IX': 8,
        'X': 9,
        '2': 2
    }
    Ncloudy = {}
    Ncloudy_raw = {}
    print 'Interpolating...'
    for tr in trans + ['NH']:
        shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
        Nvals = np.zeros(shape)
        if tr in ['CII*']:
            for i, M in enumerate(Models):
                Nvals[:, :, :, i] = M.Nex[tr][:, :, :]
        elif tr == 'NH':
            for i, M in enumerate(Models):
                logNHI = M.N['H'][:, :, :, 0]
                logNHII = M.N['H'][:, :, :, 1]
                logNHtot = np.log10(10**logNHI + 10**logNHII)
                Nvals[:, :, :, i] = logNHtot
        else:
            atom, stage = split_trans_name(tr)
            ind = roman_map[stage]
            for i, M in enumerate(Models):
                Nvals[:, :, :, i] = M.N[atom][:, :, :, ind]

        # use ndimage.map_coordinates (which is spline interpolation)
        coord = M.NHI, M.nH, M.Z, aUV
        try:
            Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
        except:
            import pdb
            pdb.set_trace()

        Ncloudy_raw[tr] = Nvals

    print 'done'
    return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
Ejemplo n.º 8
0
    # absorber id for a cluster-absorber pair
    # pair ids where the cluster in the pair is near an absorber
    # cluster id
    # qso id
    # total zpath over all pairs

    #LOGBINS = False
    #rbin = Bins(np.arange(0, 11, 1))

    LOGBINS = True
    rbin = Bins(np.arange(-1.4, 1.61, 0.2))

    outname = run_id + '/rho_dNdz_clus.sav'
    if os.path.exists(outname):
        print 'Reading', outname
        rho = loadobj(outname)
    else:
        rho = [
            dict(zpathlim=[],
                 abid=[],
                 pid=[],
                 cid=[],
                 qid=[],
                 Wr=[],
                 Wre=[],
                 zpathtot=0) for i in xrange(len(rbin.cen))
        ]

        # find tot zpath (including both field and cluster paths up to
        # z=1, only towards sightlines with a nearby cluster though) also?
Ejemplo n.º 9
0
def main(args):
    path = os.path.abspath(__file__).rsplit("/", 1)[0]
    defaults = parse_config(path + "/default.cfg")
    opt = parse_config("model.cfg", defaults)
    print pprint.pformat(opt)
    print "### Read parameters from model.cfg ###"

    filename, = args
    samples = loadobj(filename)

    mean_accept = samples["accept"].mean()
    print "Mean acceptance fraction", mean_accept
    nwalkers, nsamples, npar = samples["chain"].shape

    if not os.path.lexists("fig/"):
        os.mkdir("fig")

    if filename.startswith("samples_burn"):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples["lnprob"].ravel().argmax()
        P["ml"] = samples["chain"].reshape(-1, npar)[i]

        print "Plotting burn-in sample posteriors"
        # bins for plotting posterior histograms
        P["bins"] = [np.linspace(lo, hi, opt.Nhistbins) for lo, hi in zip(P["min"], P["max"])]

        fig, axes = plot_posteriors_burn(samples["chain"], P, npar=opt.npar)
        fig.suptitle("%i samples of %i walkers" % (nsamples, nwalkers), fontsize=14)
        fig.savefig("fig/posterior_burnin." + opt.plotformat)

        print "Plotting traces"
        fig, nwplot = plot_trace(samples["chain"])
        fig.suptitle("Chain traces for %i of %i walkers" % (nwplot, nwalkers))
        fig.savefig("fig/traces." + opt.plotformat)

        if opt.autocorr:
            print "Plotting autocorrelation"
            fig, axes = plot_autocorr(samples["chain"])
            fig.suptitle(
                "Autocorrelation for %i walkers with %i samples. "
                "(Mean acceptance fraction %.2f)" % (nwalkers, nsamples, mean_accept),
                fontsize=14,
            )
            fig.savefig("fig/autocorr." + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples
        chain = samples["chain"][:, 0 : Ns * Nt : Nt, :].reshape(-1, npar)

        # bins for plotting posterior histograms
        P["bins"] = []
        for i in xrange(len(P["names"])):
            x0, x1 = chain[:, i].min(), chain[:, i].max()
            dx = x1 - x0
            lo = x0 - 0.1 * dx
            hi = x1 + 0.1 * dx
            P["bins"].append(np.linspace(lo, hi, opt.Nhistbins))

        levels = 0.6827, 0.9545
        P["p1sig"] = [find_min_interval(chain[:, i], levels[0]) for i in range(npar)]
        P["p2sig"] = [find_min_interval(chain[:, i], levels[1]) for i in range(npar)]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples["lnprob"][:, 0 : Ns * Nt : Nt].ravel()
        isort = lnprob.argsort()
        P["ijoint_sig"] = [isort[int((1 - l) * len(lnprob)) :] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P["p1sig_joint"] = []
        P["p2sig_joint"] = []
        for i in range(npar):
            lo = chain[P["ijoint_sig"][0], i].min()
            hi = chain[P["ijoint_sig"][0], i].max()
            P["p1sig_joint"].append((lo, hi))
            lo = chain[P["ijoint_sig"][1], i].min()
            hi = chain[P["ijoint_sig"][1], i].max()
            P["p2sig_joint"].append((lo, hi))

        P["median"] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples["lnprob"].ravel().argmax()
        P["ml"] = samples["chain"].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError("Scipy minimize not available")
            print "Finding maximum likelihood parameter values"
            P["ml"] = minimize(lambda *x: -ln_likelihood(*x), P["ml"])
            print "done"

        if opt.plotposteriors:
            print "Plotting sample posteriors"
            fig, axes = plot_posteriors(chain, P, nplot=opt.npar)
            fig.suptitle("%i of %i samples, %i walkers, thinning %i" % (Ns, nsamples, nwalkers, Nt), fontsize=14)
            fig.savefig("fig/posterior_mcmc." + opt.plotformat, dpi=200)

    if opt.plotdata:
        print "Plotting the maximum likelihood model and data"
        from model import plot_model

        if opt.nsamp_plot > 1:
            chain = samples["chain"].reshape(-1, npar)
            step = int(len(chain) / opt.nsamp_plot)
            samp = chain[::step]
            fig = plot_model(samp)
        else:
            fig = plot_model([P["median"]])

    if opt.printpar and not filename.startswith("samples_burn"):
        from model import print_par

        print_par(P)

    if opt.display:
        print "Displaying..."
        pl.show()

    print "Done!"
Ejemplo n.º 10
0
def x_contifit(specfil,
               outfil=None,
               savfil=None,
               redshift=0.,
               divmult=1,
               forest_divmult=1):

    import os
    import barak.fitcont as bf
    from barak.spec import read
    from barak.io import saveobj, loadobj
    import xastropy.spec.readwrite as xsr
    reload(xsr)
    reload(bf)

    # Initialize
    if savfil == None:
        savfil = 'conti.sav'
    if outfil == None:
        outfil = 'conti.fits'

    # Read spectrum + convert to Barak format
    sp = xsr.readspec(specfil)

    # Fit spline continuum:
    if os.path.lexists(savfil):  #'contfit_' + name + '.sav'):
        option = raw_input('Adjust old continuum? (y)/n: ')
        if option.lower() != 'n':
            co_old, knots_old = loadobj(savfil)  #'contfit_' + name + '.sav')
            co, knots = bf.fitqsocont(sp.wa,
                                      sp.fl,
                                      sp.er,
                                      redshift,
                                      oldco=co_old,
                                      knots=knots_old,
                                      divmult=divmult,
                                      forest_divmult=forest_divmult)
        else:
            co, knots = bf.fitqsocont(sp.wa,
                                      sp.fl,
                                      sp.er,
                                      redshift,
                                      divmult=divmult,
                                      forest_divmult=forest_divmult)
    else:
        co, knots = bf.fitqsocont(sp.wa,
                                  sp.fl,
                                  sp.er,
                                  redshift,
                                  divmult=divmult,
                                  forest_divmult=forest_divmult)

    os.remove('_knots.sav')

    # Save continuum:
    saveobj(savfil, (co, knots), overwrite=1)

    # Check continuum:
    print('Plotting new continuum')
    plt.clf()
    plt.plot(sp.wa, sp.fl, drawstyle='steps-mid')
    plt.plot(sp.wa, sp.co, color='r')
    plt.show()

    # Repeat?
    confirm = raw_input('Keep continuum? (y)/n: ')
    if confirm == 'y':
        fits.writeto(outfil, sp, clobber=True)
    else:
        print('Writing to tmp.fits anyhow!')
        fits.writeto('tmp.fits', sp, clobber=True)
Ejemplo n.º 11
0
current directory!"""

if 1:
    filenames = glob('raw/*.fits*')

    if len(filenames) == 0:
        print usage
        sys.exit()
    print 'Reading', len(filenames), 'raw input files...'

    # types

    #use keywords HIERARCH ESO INS TPL ID, OBJECT and OBS NAME to identify them

    if os.path.lexists('_sort_LBC.sav'):
        d = loadobj('_sort_LBC.sav')
        biases = d['biases']
        objects = d['objects']
        flats = d['flats']

    else:
        # IMAGETYP

        # object
        # zero
        # flat
        # FOCUS

        # OBJECT

        # BinoBias
Ejemplo n.º 12
0
def main(args):
    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print pprint.pformat(opt)
    print '### Read parameters from model.cfg ###'

    filename, = args
    samples = loadobj(filename)

    mean_accept = samples['accept'].mean()
    print 'Mean acceptance fraction', mean_accept
    nwalkers, nsamples, npar = samples['chain'].shape

    if not os.path.lexists('fig/'):
        os.mkdir('fig')

    if filename.startswith('samples_burn'):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        print 'Plotting burn-in sample posteriors'
        # bins for plotting posterior histograms
        P['bins'] = [
            np.linspace(lo, hi, opt.Nhistbins)
            for lo, hi in zip(P['min'], P['max'])
        ]

        fig, axes = plot_posteriors_burn(samples['chain'], P, npar=opt.npar)
        fig.suptitle('%i samples of %i walkers' % (nsamples, nwalkers),
                     fontsize=14)
        fig.savefig('fig/posterior_burnin.' + opt.plotformat)

        print 'Plotting traces'
        fig, nwplot = plot_trace(samples['chain'])
        fig.suptitle('Chain traces for %i of %i walkers' % (nwplot, nwalkers))
        fig.savefig('fig/traces.' + opt.plotformat)

        if opt.autocorr:
            print 'Plotting autocorrelation'
            fig, axes = plot_autocorr(samples['chain'])
            fig.suptitle('Autocorrelation for %i walkers with %i samples. '
                         '(Mean acceptance fraction %.2f)' %
                         (nwalkers, nsamples, mean_accept),
                         fontsize=14)
            fig.savefig('fig/autocorr.' + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples
        chain = samples['chain'][:, 0:Ns * Nt:Nt, :].reshape(-1, npar)

        # bins for plotting posterior histograms
        P['bins'] = []
        for i in xrange(len(P['names'])):
            x0, x1 = chain[:, i].min(), chain[:, i].max()
            dx = x1 - x0
            lo = x0 - 0.1 * dx
            hi = x1 + 0.1 * dx
            P['bins'].append(np.linspace(lo, hi, opt.Nhistbins))

        levels = 0.6827, 0.9545
        P['p1sig'] = [
            find_min_interval(chain[:, i], levels[0]) for i in range(npar)
        ]
        P['p2sig'] = [
            find_min_interval(chain[:, i], levels[1]) for i in range(npar)
        ]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples['lnprob'][:, 0:Ns * Nt:Nt].ravel()
        isort = lnprob.argsort()
        P['ijoint_sig'] = [isort[int((1 - l) * len(lnprob)):] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P['p1sig_joint'] = []
        P['p2sig_joint'] = []
        for i in range(npar):
            lo = chain[P['ijoint_sig'][0], i].min()
            hi = chain[P['ijoint_sig'][0], i].max()
            P['p1sig_joint'].append((lo, hi))
            lo = chain[P['ijoint_sig'][1], i].min()
            hi = chain[P['ijoint_sig'][1], i].max()
            P['p2sig_joint'].append((lo, hi))

        P['median'] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError('Scipy minimize not available')
            print 'Finding maximum likelihood parameter values'
            P['ml'] = minimize(lambda *x: -ln_likelihood(*x), P['ml'])
            print 'done'

        if opt.plotposteriors:
            print 'Plotting sample posteriors'
            fig, axes = plot_posteriors(chain, P, nplot=opt.npar)
            fig.suptitle('%i of %i samples, %i walkers, thinning %i' %
                         (Ns, nsamples, nwalkers, Nt),
                         fontsize=14)
            fig.savefig('fig/posterior_mcmc.' + opt.plotformat, dpi=200)

    if opt.plotdata:
        print 'Plotting the maximum likelihood model and data'
        from model import plot_model
        if opt.nsamp_plot > 1:
            chain = samples['chain'].reshape(-1, npar)
            step = int(len(chain) / opt.nsamp_plot)
            samp = chain[::step]
            fig = plot_model(samp)
        else:
            fig = plot_model([P['median']])

    if opt.printpar and not filename.startswith('samples_burn'):
        from model import print_par
        print_par(P)

    if opt.display:
        print 'Displaying...'
        pl.show()

    print 'Done!'
Ejemplo n.º 13
0
import time, os

from barak.plot import make_log_xlabels, make_log_ylabels
from barak.virial import deltavir

from scipy.integrate import quad

from cosmolopy.perturbation import fgrowth, w_tophat, power_spectrum
from cosmolopy.parameters import WMAP7_BAO_H0_mean

from barak.io import loadobj, saveobj


SIGMA_CACHE = {}
if os.path.exists('SIGMA_CACHE.sav'):
    SIGMA_CACHE = loadobj('SIGMA_CACHE.sav')
    print('Found', len(SIGMA_CACHE), 'cached sigma values in save file')

FIGSIZE = 5,5

PLOT = 1

def find_roots(darr):
    """ Approximate root-finding. Cannot find multiple roots that are
    closer together than the array spacing.
    """
    sign = np.sign(darr)
    c0 = np.zeros(len(darr), dtype=bool)
    # The offset isn't quite right here, but it doesn't change much
    c0[1:] = np.abs(sign[1:] - sign[:-1]) == 2
    return c0
Ejemplo n.º 14
0
    ################################################################
    # Read the cloudy grids and make the interpolators
    ################################################################ 
    Ncloudy, Ncloudy_raw, Models, aUV = make_interpolators_uvbtilt(
        trans, simnames)
    M = Models[0]
    #import pdb; pdb.set_trace()
    Uconst_vals = []
    for model in Models:
        Uconst_vals.append((model['U'] + model['nH'])[0])

    # note it's a function of aUV!
    Uconst = AkimaSpline(aUV, Uconst_vals)

    # Now find the parameter chains
    samples = loadobj('samples_mcmc.sav.gz')
    nwalkers, nsamples, npar = samples['chain'].shape
    parvals = samples['chain'].reshape(-1, npar)

    PAR = samples['par']
    assert PAR['names'][-1] == 'aUV'
    assert PAR['names'][-2] == 'Z'
    assert PAR['names'][-3] == 'nH'
    assert PAR['names'][-4] == 'NHI'

    aUV = parvals[:,-1]
    logZ = parvals[:,-2]
    lognH = parvals[:,-3]
    logNHI = parvals[:,-4]

    logU = Uconst(aUV) - lognH
Ejemplo n.º 15
0
Archivo: model.py Proyecto: nhmc/LAE
def make_interpolators_uvbtilt(trans, simnames):
    """ Make interpolators including different UV slopes, given by the
    simulation names.

    simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),

    uvb k values must be sorted in ascending order!
    """

    Models = []
    aUV = []
    for simname in simnames:
        # need to define prefix, SIMNAME
        gridname = os.path.join(simname, 'grid.cfg')
    
        print 'Reading', gridname
        cfg = parse_config(gridname)
        aUV.append(cfg.uvb_tilt)
    
        name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
        print 'Reading', name
        M = loadobj(name)
        M = adict(M)

        Uconst = (M.U + M.nH)[0]
        print 'Uconst', Uconst, cfg.uvb_tilt
        assert np.allclose(Uconst, M.U + M.nH)
        Models.append(M)

    ##########################################################################
    # Interpolate cloudy grids onto a finer scale for plotting and
    # likelihood calculation
    ##########################################################################

    roman_map = {'I':0, 'II':1, 'III':2, 'IV':3, 'V':4, 'VI':5,
                 'VII':6, 'VIII':7, 'IX':8, 'X':9, '2':2}
    Ncloudy = {}
    Ncloudy_raw = {}
    print 'Interpolating...'
    for tr in trans + ['NH']:
        shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
        Nvals = np.zeros(shape)
        if tr in ['CII*']:
            for i,M in enumerate(Models):
                Nvals[:,:,:,i] = M.Nex[tr][:,:,:]
        elif tr == 'NH':
            for i,M in enumerate(Models):
                logNHI = M.N['H'][:,:,:,0]
                logNHII = M.N['H'][:,:,:,1]
                logNHtot = np.log10(10**logNHI + 10**logNHII)
                Nvals[:,:,:,i] = logNHtot            
        else:
            atom, stage = split_trans_name(tr)
            ind = roman_map[stage]
            for i,M in enumerate(Models):
                Nvals[:,:,:,i] = M.N[atom][:,:,:,ind]

        # use ndimage.map_coordinates (which is spline interpolation)
        coord = M.NHI, M.nH, M.Z, aUV
        try:
            Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
        except:
            import pdb; pdb.set_trace()

        Ncloudy_raw[tr] = Nvals

    print 'done'
    return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
Ejemplo n.º 16
0
    ax1 = ax.twiny()
    x0,x1 = ax.get_xlim()
    const = (grid.U + grid.nH)[0]
    assert np.allclose(const, grid.U + grid.nH)
    ax1.set_xlim(const - x0, const - x1)
    ax1.set_xlabel('$\log_{10}\ U$')


if 1:
    ##############################################
    # Read the model
    ##############################################
    
    cfg = parse_config(gridname)

    M = loadobj(os.path.join(prefix, simname, cfg.prefix + '_grid.sav.gz'))
    M = adict(M)

    # A finer grid of parameter values for interpolation below
    NHI = np.linspace(M.NHI[0], M.NHI[-1], 100)
    nH = np.linspace(M.nH[0], M.nH[-1], 101)
    Z = np.linspace(M.Z[0], M.Z[-1], 102)

    dNHI = NHI[1] - NHI[0]
    dnH = nH[1] - nH[0]
    dZ = Z[1] - Z[0]


if 1:
    ##############################################
    # Read the observed column densities
Ejemplo n.º 17
0
def main(args):
    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print pprint.pformat(opt)
    print '### Read parameters from model.cfg ###'

    filename, = args
    samples = loadobj(filename)

    mean_accept =  samples['accept'].mean()
    print 'Mean acceptance fraction', mean_accept
    nwalkers, nsamples, npar = samples['chain'].shape

    if not os.path.lexists('fig/'):
        os.mkdir('fig')

    if filename.startswith('samples_burn'):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        print 'Plotting burn-in sample posteriors'
        # bins for plotting posterior histograms
        P['bins'] = [np.linspace(lo, hi, opt.Nhistbins) for
                     lo,hi in zip(P['min'], P['max'])]

        fig,axes = plot_posteriors_burn(samples['chain'], P, npar=opt.npar)
        fig.suptitle('%i samples of %i walkers' % (
            nsamples, nwalkers), fontsize=14)
        fig.savefig('fig/posterior_burnin.' + opt.plotformat)
        
        print 'Plotting traces'
        fig, nwplot = plot_trace(samples['chain'])
        fig.suptitle('Chain traces for %i of %i walkers' % (nwplot,nwalkers))
        fig.savefig('fig/traces.' + opt.plotformat)

        if opt.autocorr:
            print 'Plotting autocorrelation'
            fig, axes = plot_autocorr(samples['chain'])
            fig.suptitle('Autocorrelation for %i walkers with %i samples. '
                         '(Mean acceptance fraction %.2f)' %
                         (nwalkers, nsamples, mean_accept), fontsize=14)
            fig.savefig('fig/autocorr.' + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples 
        chain = samples['chain'][:,0:Ns*Nt:Nt,:].reshape(-1, npar)


        # bins for plotting posterior histograms
        P['bins'] = []
        for i in xrange(len(P['names'])):
            x0, x1 = chain[:,i].min(), chain[:,i].max()
            dx = x1 - x0
            lo = x0 - 0.1*dx
            hi = x1 + 0.1*dx
            P['bins'].append( np.linspace(lo, hi, opt.Nhistbins) )


        levels = 0.6827, 0.9545
        P['p1sig'] = [find_min_interval(chain[:, i], levels[0]) for i
                      in range(npar)]
        P['p2sig'] = [find_min_interval(chain[:, i], levels[1]) for i
                      in range(npar)]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples['lnprob'][:,0:Ns*Nt:Nt].ravel()
        isort = lnprob.argsort()
        P['ijoint_sig'] = [isort[int((1-l)*len(lnprob)):] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P['p1sig_joint'] = []
        P['p2sig_joint'] = []
        for i in range(npar):
            lo = chain[P['ijoint_sig'][0], i].min()
            hi = chain[P['ijoint_sig'][0], i].max() 
            P['p1sig_joint'].append((lo, hi))
            lo = chain[P['ijoint_sig'][1], i].min()
            hi = chain[P['ijoint_sig'][1], i].max()
            P['p2sig_joint'].append((lo, hi))

        P['median'] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError('Scipy minimize not available')
            print 'Finding maximum likelihood parameter values'
            P['ml'] = minimize(lambda *x: -ln_likelihood(*x), P['ml'])
            print 'done'

        if opt.plotposteriors:
            print 'Plotting sample posteriors'
            fig, axes = plot_posteriors(chain, P, npar=opt.npar)
            fig.suptitle('%i of %i samples, %i walkers, thinning %i' % (
                Ns, nsamples, nwalkers, Nt), fontsize=14)
            fig.savefig('fig/posterior_mcmc.' + opt.plotformat)

    if opt.plotdata:
        print 'Plotting the maximum likelihood model and data'
        from model import plot_model
        fig = plot_model(P['ml'])
        fig.savefig('fig/model.' + opt.plotformat)

    if opt.printpar and not filename.startswith('samples_burn'):
        from model import print_par
        print_par(P)

    if opt.display:
        print 'Displaying...'
        pl.show()

    print 'Done!'
Ejemplo n.º 18
0

if 1:
    filenames = glob('raw/*.fits*')

    if len(filenames) == 0:
        print usage
        sys.exit()
    print 'Reading', len(filenames), 'raw input files...'

    # types

    #use keywords HIERARCH ESO INS TPL ID, OBJECT and OBS NAME to identify them

    if os.path.lexists('_sort_LBC.sav'):
        d = loadobj('_sort_LBC.sav')
        biases = d['biases']
        objects = d['objects']
        flats = d['flats']

    else:
        # IMAGETYP

        # object
        # zero
        # flat
        # FOCUS

        # OBJECT

        # BinoBias