Пример #1
0
def main(args=None):

    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print '### Read parameters from model.cfg ###'

    print 'model parameters', P['names']
    print 'minimum allowed values', P['min']
    print 'maximum allowed values', P['max']

    Npar = len(P['names'])

    print opt.Nthreads, 'threads'
    print opt.Nwalkers, 'walkers'

    sampler = emcee.EnsembleSampler(opt.Nwalkers,
                                    Npar,
                                    ln_likelihood,
                                    threads=opt.Nthreads)

    if opt.Nburn > 0:
        t1 = time.time()
        p0 = get_initial_positions(opt.Nwalkers)
        run_burn_in(sampler, opt, p0)
        print '%.2g min elapsed' % ((time.time() - t1) / 60.)

    if opt.Nmcmc > 0:
        t1 = time.time()
        run_mcmc(sampler, opt)
        print '%.2g min elapsed' % ((time.time() - t1) / 60.)

    return sampler
Пример #2
0
def main(args=None):

    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print '### Read parameters from model.cfg ###'

    print 'model parameters', P['names']
    print 'minimum allowed values', P['min']
    print 'maximum allowed values', P['max']

    Npar = len(P['names'])

    print opt.Nthreads, 'threads'
    print opt.Nwalkers, 'walkers'

    sampler = emcee.EnsembleSampler(
        opt.Nwalkers, Npar, ln_likelihood, threads=opt.Nthreads)

    if opt.Nburn > 0:
        t1 = time.time()
        p0 = get_initial_positions(opt.Nwalkers)
        run_burn_in(sampler, opt, p0)
        print '%.2g min elapsed' % ((time.time() - t1)/60.)

    if opt.Nmcmc > 0:
        t1 = time.time()
        run_mcmc(sampler, opt)
        print '%.2g min elapsed' % ((time.time() - t1)/60.)

    return sampler
Пример #3
0
Файл: cloudy.py Проект: nhmc/H2
def read_config(name):
    cfg = parse_config(name, defaults=cfg_defaults)
    cfg.overwrite = bool(cfg.overwrite)
    cfg.nproc = int(cfg.nproc)
    cfg.z = float(cfg.z)
    for k in 'logNHI lognH logZ'.split():
        vmin, vmax, step = map(float, cfg[k].split()) 
        cfg[k] = np.arange(vmin, vmax + 0.5*step, step)
    return cfg
Пример #4
0
def read_config(name):
    """ read the configuration file, doing some extra processing
    beyond that done by parse_config().
    """
    cfg = parse_config(name, defaults=cfg_defaults)
    cfg.overwrite = bool(cfg.overwrite)
    cfg.nproc = int(cfg.nproc)
    cfg.z = float(cfg.z)
    for k in 'logNHI lognH logZ'.split():
        vmin, vmax, step = map(float, cfg[k].split()) 
        cfg[k] = np.arange(vmin, vmax + 0.5*step, step)

    return cfg
Пример #5
0
def read_observed(filename):
    """ Read a config-style file with ions and column densities.

    Each ion has three lines, first line is log10 of N (cm^-2),second
    and third lines are lower and upper errors. Blacnk lines and lines
    starting with '#' are ignored.

    An example file:

    # column densities for the component showing D, to be compared to
    # cloudy models.

    # logN, logN sigma down, logN sigma up (same as up if not given).

    # if sigma low is 0, then the first value is a lower limit.  sigma
    # high is 0, than the first value is an upper limit.

    # Errors are 1 sigma from vpfit
    HI =  14.88 0.01
    AlIII = 10.79  0.38
    AlII  = 10.97  0.16
    CII  =  12.04  0.15
    MgII =  11.33  0.17
    SiIV =  12.495 0.012
    CIV =   13.369 0.006

    # lower limit (saturated). We assume the upper limit is equal
    # to NHI
    CIII = 13.0   0       5

    # upper limits, either blends or non-detections
    SiII = 11.77   5      0
    SiIII = 12.665 5      0
    OI   = 12.407  5      0
    NII   = 13.283 5      0
    """
    obs = parse_config(filename)
    for k in obs:
        # don't alter entries that are used for the priors when fitting
        # with emcee
        if k.startswith('min ') or k.startswith('max '):
            continue
        vals = map(float, obs[k].split())
        if len(vals) == 2:
            obs[k] = vals[0], vals[1], vals[1]
        elif len(vals) == 3:
            obs[k] = tuple(vals)
        else:
            raise ValueError('Error parsing entry %s' % obs[k])

    return obs
Пример #6
0
def read_observed(filename):
    """ Read a config-style file with ions and column densities.

    Each ion has three lines, first line is log10 of N (cm^-2),second
    and third lines are lower and upper errors. Blacnk lines and lines
    starting with '#' are ignored.

    An example file:

    # column densities for the component showing D, to be compared to
    # cloudy models.

    # logN, logN sigma down, logN sigma up (same as up if not given).

    # if sigma low is 0, then the first value is a lower limit.  sigma
    # high is 0, than the first value is an upper limit.

    # Errors are 1 sigma from vpfit
    HI =  14.88 0.01
    AlIII = 10.79  0.38
    AlII  = 10.97  0.16
    CII  =  12.04  0.15
    MgII =  11.33  0.17
    SiIV =  12.495 0.012
    CIV =   13.369 0.006

    # lower limit (saturated). We assume the upper limit is equal
    # to NHI
    CIII = 13.0   0       5

    # upper limits, either blends or non-detections
    SiII = 11.77   5      0
    SiIII = 12.665 5      0
    OI   = 12.407  5      0
    NII   = 13.283 5      0
    """
    obs = parse_config(filename)
    for k in obs:
        # skip entries that are used for the priors when fitting
        # with emcee
        if k.startswith('min ') or k.startswith('max '):
            continue
        vals = map(float, obs[k].split())
        if len(vals) == 2:
            obs[k] = vals[0], vals[1], vals[1]
        elif len(vals) == 3:
            obs[k] = tuple(vals)
        else:
            raise ValueError('Error parsing entry %s' % obs[k])

    return obs
Пример #7
0
def read_observed(filename):
    """ Read a config-style file with ions and column densities.

    Each ion has three lines, first line is log10 of N (cm^-2),second
    and third lines are lower and upper errors.
    """
    obs = parse_config(filename)
    for k in obs:
        vals = map(float, obs[k].split())
        if len(vals) == 2:
            obs[k] = vals[0], vals[1], vals[1]
        elif len(vals) == 3:
            obs[k] = tuple(vals)
        else:
            raise ValueError('Error parsing entry %s' % obs[k])

    return obs
Пример #8
0
def make_interpolators_uvbtilt(trans, simnames):
    """ Make interpolators including different UV slopes, given by the
    simulation names.

    simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),

    uvb k values must be sorted in ascending order!
    """

    Models = []
    aUV = []
    for simname in simnames:
        # need to define prefix, SIMNAME
        gridname = os.path.join(simname, 'grid.cfg')

        print 'Reading', gridname
        cfg = parse_config(gridname)
        aUV.append(cfg.uvb_tilt)

        name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
        print 'Reading', name
        M = loadobj(name)
        M = adict(M)

        Uconst = (M.U + M.nH)[0]
        print 'Uconst', Uconst, cfg.uvb_tilt
        assert np.allclose(Uconst, M.U + M.nH)
        Models.append(M)

    ##########################################################################
    # Interpolate cloudy grids onto a finer scale for plotting and
    # likelihood calculation
    ##########################################################################

    roman_map = {
        'I': 0,
        'II': 1,
        'III': 2,
        'IV': 3,
        'V': 4,
        'VI': 5,
        'VII': 6,
        'VIII': 7,
        'IX': 8,
        'X': 9,
        '2': 2
    }
    Ncloudy = {}
    Ncloudy_raw = {}
    print 'Interpolating...'
    for tr in trans + ['NH']:
        shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
        Nvals = np.zeros(shape)
        if tr in ['CII*']:
            for i, M in enumerate(Models):
                Nvals[:, :, :, i] = M.Nex[tr][:, :, :]
        elif tr == 'NH':
            for i, M in enumerate(Models):
                logNHI = M.N['H'][:, :, :, 0]
                logNHII = M.N['H'][:, :, :, 1]
                logNHtot = np.log10(10**logNHI + 10**logNHII)
                Nvals[:, :, :, i] = logNHtot
        else:
            atom, stage = split_trans_name(tr)
            ind = roman_map[stage]
            for i, M in enumerate(Models):
                Nvals[:, :, :, i] = M.N[atom][:, :, :, ind]

        # use ndimage.map_coordinates (which is spline interpolation)
        coord = M.NHI, M.nH, M.Z, aUV
        try:
            Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
        except:
            import pdb
            pdb.set_trace()

        Ncloudy_raw[tr] = Nvals

    print 'done'
    return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
Пример #9
0
# j2u  : 2 sigma upper level (joint) 
# ml   : maximum likelihood value
# med  : median value
"""
    from barak.io import writetxt
    writetxt('fig/pars.txt', rec, header=hd, fmt_float='.4g', overwrite=1)


if 1:
    ##################################################
    # Read configuration file, set global variables
    ##################################################
    cfgname = 'model.cfg'
    # we only need the cfg file for the prefix of the cloudy runs and
    # the name of the file with the observed column densities.
    opt = parse_config(cfgname)

    testing = 0

    MIN_SIG = float(opt['min_sig'])
    SIG_LIMIT = 0.05
    # H is an honorary alpha element here; it just means no offset is
    # added.
    ALPHA_ELEMENTS = 'Si O Mg S Ca Ne Ti H'.split()
    FEPEAK_ELEMENTS = 'Fe Cr Mn Co Ni'.split()

    simnames = sorted(glob(opt['simname']))
    assert len(simnames) > 0

if 1:
    ##################################################
Пример #10
0
def process_options(args):
    opt = adict()
    filename = os.path.abspath(__file__).rsplit('/', 1)[0] + '/default.cfg'
    opt = parse_config(filename)
    if os.path.lexists('./plot.cfg'):
        opt = parse_config('./plot.cfg', opt)

    opt.atom = readatom(molecules=True)

    if opt.Rfwhm is not None:
        if isinstance(opt.Rfwhm, basestring):
            if opt.Rfwhm == 'convolve_with_COS_FOS':
                if convolve_with_COS_FOS is None:
                    raise ValueError('convolve_with_COS_FOS() not available')
                print('Using tailored FWHM for COS/FOS data')
                opt.Rfwhm = 'convolve_with_COS_FOS'
            elif opt.Rfwhm.endswith('fits'):
                print('Reading Resolution FWHM from', opt.Rfwhm)
                res = readtabfits(opt.Rfwhm)
                opt.Rfwhm = res.res / 2.354
            else:
                print('Reading Resolution FWHM from', opt.Rfwhm)
                fh = open(opt.Rfwhm)
                opt.Rfwhm = 1 / 2.354 * np.array([float(r) for r in fh])
                fh.close()
        else:
            opt.Rfwhm = float(opt.Rfwhm)

    if opt.features is not None:
        print('Reading feature list from', opt.features)
        opt.features = readtabfits(opt.features)

    if opt.f26 is not None:
        name = opt.f26
        print('Reading ions and fitting regions from', name)
        opt.f26 = readf26(name)
        opt.f26.filename = name

    if opt.transitions is not None:
        print('Reading transitions from', opt.transitions)
        fh = open(opt.transitions)
        trans = list(fh)
        fh.close()
        temp = []
        for tr in trans:
            tr = tr.strip()
            if tr and not tr.startswith('#'):
                junk = tr.split()
                tr = junk[0] + ' ' + junk[1]
                t = findtrans(tr, atomdat=opt.atom)
                temp.append(dict(name=t[0], wa=t[1][0], tr=t[1]))
        opt.linelist = temp
    else:
        opt.linelist = readtxt(get_data_path() + 'linelists/qsoabs_lines',
                               names='wa,name,select')

    if opt.f26 is None and opt.taulines is not None:
        print('Reading ions from', opt.taulines)
        fh = open(opt.taulines)
        lines = []
        for row in fh:
            if row.lstrip().startswith('#'):
                continue
            items = row.split()
            lines.append([items[0]] + list(map(float, items[1:])))
        fh.close()
        opt.lines = lines

    if opt.show_regions is None:
        opt.show_regions = True

    if hasattr(opt, 'aodname'):
        opt.aod = Table.read(opt.aodname)

    return opt
Пример #11
0
def main(args):
    path = os.path.abspath(__file__).rsplit("/", 1)[0]
    defaults = parse_config(path + "/default.cfg")
    opt = parse_config("model.cfg", defaults)
    print pprint.pformat(opt)
    print "### Read parameters from model.cfg ###"

    filename, = args
    samples = loadobj(filename)

    mean_accept = samples["accept"].mean()
    print "Mean acceptance fraction", mean_accept
    nwalkers, nsamples, npar = samples["chain"].shape

    if not os.path.lexists("fig/"):
        os.mkdir("fig")

    if filename.startswith("samples_burn"):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples["lnprob"].ravel().argmax()
        P["ml"] = samples["chain"].reshape(-1, npar)[i]

        print "Plotting burn-in sample posteriors"
        # bins for plotting posterior histograms
        P["bins"] = [np.linspace(lo, hi, opt.Nhistbins) for lo, hi in zip(P["min"], P["max"])]

        fig, axes = plot_posteriors_burn(samples["chain"], P, npar=opt.npar)
        fig.suptitle("%i samples of %i walkers" % (nsamples, nwalkers), fontsize=14)
        fig.savefig("fig/posterior_burnin." + opt.plotformat)

        print "Plotting traces"
        fig, nwplot = plot_trace(samples["chain"])
        fig.suptitle("Chain traces for %i of %i walkers" % (nwplot, nwalkers))
        fig.savefig("fig/traces." + opt.plotformat)

        if opt.autocorr:
            print "Plotting autocorrelation"
            fig, axes = plot_autocorr(samples["chain"])
            fig.suptitle(
                "Autocorrelation for %i walkers with %i samples. "
                "(Mean acceptance fraction %.2f)" % (nwalkers, nsamples, mean_accept),
                fontsize=14,
            )
            fig.savefig("fig/autocorr." + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples
        chain = samples["chain"][:, 0 : Ns * Nt : Nt, :].reshape(-1, npar)

        # bins for plotting posterior histograms
        P["bins"] = []
        for i in xrange(len(P["names"])):
            x0, x1 = chain[:, i].min(), chain[:, i].max()
            dx = x1 - x0
            lo = x0 - 0.1 * dx
            hi = x1 + 0.1 * dx
            P["bins"].append(np.linspace(lo, hi, opt.Nhistbins))

        levels = 0.6827, 0.9545
        P["p1sig"] = [find_min_interval(chain[:, i], levels[0]) for i in range(npar)]
        P["p2sig"] = [find_min_interval(chain[:, i], levels[1]) for i in range(npar)]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples["lnprob"][:, 0 : Ns * Nt : Nt].ravel()
        isort = lnprob.argsort()
        P["ijoint_sig"] = [isort[int((1 - l) * len(lnprob)) :] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P["p1sig_joint"] = []
        P["p2sig_joint"] = []
        for i in range(npar):
            lo = chain[P["ijoint_sig"][0], i].min()
            hi = chain[P["ijoint_sig"][0], i].max()
            P["p1sig_joint"].append((lo, hi))
            lo = chain[P["ijoint_sig"][1], i].min()
            hi = chain[P["ijoint_sig"][1], i].max()
            P["p2sig_joint"].append((lo, hi))

        P["median"] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples["lnprob"].ravel().argmax()
        P["ml"] = samples["chain"].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError("Scipy minimize not available")
            print "Finding maximum likelihood parameter values"
            P["ml"] = minimize(lambda *x: -ln_likelihood(*x), P["ml"])
            print "done"

        if opt.plotposteriors:
            print "Plotting sample posteriors"
            fig, axes = plot_posteriors(chain, P, nplot=opt.npar)
            fig.suptitle("%i of %i samples, %i walkers, thinning %i" % (Ns, nsamples, nwalkers, Nt), fontsize=14)
            fig.savefig("fig/posterior_mcmc." + opt.plotformat, dpi=200)

    if opt.plotdata:
        print "Plotting the maximum likelihood model and data"
        from model import plot_model

        if opt.nsamp_plot > 1:
            chain = samples["chain"].reshape(-1, npar)
            step = int(len(chain) / opt.nsamp_plot)
            samp = chain[::step]
            fig = plot_model(samp)
        else:
            fig = plot_model([P["median"]])

    if opt.printpar and not filename.startswith("samples_burn"):
        from model import print_par

        print_par(P)

    if opt.display:
        print "Displaying..."
        pl.show()

    print "Done!"
Пример #12
0
             'HI 914': [(150+vzero, 200+vzero), (260+vzero, 400+vzero)],
             'MgII 2796': [(-280,-200),(60,200)],
             'MgII 2803': [(-300+vzero,-130+vzero),(250+vzero,305+vzero)],
             'FeIII 1122': [(-300+vzero,-100+vzero),(155+vzero,400+vzero)],
             'SiIII 1206': [(-300+vzero,-95+vzero),(-70+vzero,10+vzero),
                            (115+vzero,195+vzero), (200+vzero,220+vzero),
                            (240+vzero,400+vzero)],
             'CIII 977': [(-190+vzero,-155+vzero), (0+vzero,400+vzero),],
             'CII 1334': [(0,300)],
             'SiII 1526': [(60,300)],
             'NV 1238': [(90+vzero,200+vzero),(280+vzero, 403+vzero)],
             'OVI 1031': [(-500,-360),(-100, -20),(45,200)],
             'OVI 1037': [(-500,-290), (-100, +45), (70,200)],
             }

cfg = parse_config(config)
transitions = read_transitions(cfg.trfilename, ATOMDAT)

if 1:
    sp = barak.spec.read(cfg.spfilename)
    ndiv = 4.
    wa_dv = make_constant_dv_wa_scale(sp.wa[0], sp.wa[-1], cfg.Rfwhm / ndiv)

    expand_cont_adjustment = 5

    vp = readf26(cfg.f26name)
    lines = vp.lines[vp.lines.name != '<>']
    tau, ticks, alltau = find_tau(sp.wa, lines, ATOMDAT, per_trans=1)
    model = convolve_constant_dv(sp.wa, np.exp(-tau), wa_dv, ndiv)
    models = [convolve_constant_dv(sp.wa, np.exp(-t), wa_dv, ndiv) for t in
              alltau]
Пример #13
0
Файл: cloudy.py Проект: nhmc/H2
import os, sys, time, multiprocessing, gzip
from cStringIO import StringIO

cfg_temp = """\
abundances=None
table=None
run_cloudy=True
distance_starburst_kpc=None
overwrite=False
logfnu912=None
fesc=None
grains=False
constantpressure=False
"""

cfg_defaults = parse_config(StringIO(cfg_temp))

def write_example_grid_config():
    fh = open('grid.cfg', 'w')
    fh.write("""\
# Path to the CUBA file giving the UV background. 
cuba_name = /home/nhmc/code/repo/QSOClustering-svn/misc/CUBA/Q1G01/bkgthick.out
# Prefix for final grid file
prefix = qg
# Redshift for the UV background
z = 2.2
# Minimum, maximum and step for neutral hydrogen column density (cm^-2)
logNHI =  14.0  19.0 1.0
# Minimum, maximum and step for metallicity
logZ   =  -2.0  0.0  1.0
# Minimum, maximum and step for hydrogen density (cm^-3)
Пример #14
0
spfilename = q0107c_HIRES.txt
trfilename = transitions/general
vmax = 299
wadiv = None
Rfwhm = 6.6
osc = False
residuals = True
redshift = 0.5571530371
"""

cfgname = 'velplot.cfg'

with open(cfgname, 'w') as fh:
    fh.write(cfg)

opt = parse_config(cfgname)

pl.rc('xtick',labelsize=11)
pl.rc('ytick',labelsize=11)

if 1:
    sp, transitions, model, models, ticks = make_models()
    fig, axes = make_plot(sp, transitions, model, models, [])

    mg2796 = 2796.3542699
    waobs = (opt.redshift + 1) * mg2796
    tickpos = (ticks[ticks.wa0 == mg2796].wa / waobs  - 1) * c_kms

    for i,ax in enumerate(axes):
        if i > 3:
            ax.set_yticklabels([])
Пример #15
0
def main(args):
    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print pprint.pformat(opt)
    print '### Read parameters from model.cfg ###'

    filename, = args
    samples = loadobj(filename)

    mean_accept = samples['accept'].mean()
    print 'Mean acceptance fraction', mean_accept
    nwalkers, nsamples, npar = samples['chain'].shape

    if not os.path.lexists('fig/'):
        os.mkdir('fig')

    if filename.startswith('samples_burn'):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        print 'Plotting burn-in sample posteriors'
        # bins for plotting posterior histograms
        P['bins'] = [
            np.linspace(lo, hi, opt.Nhistbins)
            for lo, hi in zip(P['min'], P['max'])
        ]

        fig, axes = plot_posteriors_burn(samples['chain'], P, npar=opt.npar)
        fig.suptitle('%i samples of %i walkers' % (nsamples, nwalkers),
                     fontsize=14)
        fig.savefig('fig/posterior_burnin.' + opt.plotformat)

        print 'Plotting traces'
        fig, nwplot = plot_trace(samples['chain'])
        fig.suptitle('Chain traces for %i of %i walkers' % (nwplot, nwalkers))
        fig.savefig('fig/traces.' + opt.plotformat)

        if opt.autocorr:
            print 'Plotting autocorrelation'
            fig, axes = plot_autocorr(samples['chain'])
            fig.suptitle('Autocorrelation for %i walkers with %i samples. '
                         '(Mean acceptance fraction %.2f)' %
                         (nwalkers, nsamples, mean_accept),
                         fontsize=14)
            fig.savefig('fig/autocorr.' + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples
        chain = samples['chain'][:, 0:Ns * Nt:Nt, :].reshape(-1, npar)

        # bins for plotting posterior histograms
        P['bins'] = []
        for i in xrange(len(P['names'])):
            x0, x1 = chain[:, i].min(), chain[:, i].max()
            dx = x1 - x0
            lo = x0 - 0.1 * dx
            hi = x1 + 0.1 * dx
            P['bins'].append(np.linspace(lo, hi, opt.Nhistbins))

        levels = 0.6827, 0.9545
        P['p1sig'] = [
            find_min_interval(chain[:, i], levels[0]) for i in range(npar)
        ]
        P['p2sig'] = [
            find_min_interval(chain[:, i], levels[1]) for i in range(npar)
        ]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples['lnprob'][:, 0:Ns * Nt:Nt].ravel()
        isort = lnprob.argsort()
        P['ijoint_sig'] = [isort[int((1 - l) * len(lnprob)):] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P['p1sig_joint'] = []
        P['p2sig_joint'] = []
        for i in range(npar):
            lo = chain[P['ijoint_sig'][0], i].min()
            hi = chain[P['ijoint_sig'][0], i].max()
            P['p1sig_joint'].append((lo, hi))
            lo = chain[P['ijoint_sig'][1], i].min()
            hi = chain[P['ijoint_sig'][1], i].max()
            P['p2sig_joint'].append((lo, hi))

        P['median'] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError('Scipy minimize not available')
            print 'Finding maximum likelihood parameter values'
            P['ml'] = minimize(lambda *x: -ln_likelihood(*x), P['ml'])
            print 'done'

        if opt.plotposteriors:
            print 'Plotting sample posteriors'
            fig, axes = plot_posteriors(chain, P, nplot=opt.npar)
            fig.suptitle('%i of %i samples, %i walkers, thinning %i' %
                         (Ns, nsamples, nwalkers, Nt),
                         fontsize=14)
            fig.savefig('fig/posterior_mcmc.' + opt.plotformat, dpi=200)

    if opt.plotdata:
        print 'Plotting the maximum likelihood model and data'
        from model import plot_model
        if opt.nsamp_plot > 1:
            chain = samples['chain'].reshape(-1, npar)
            step = int(len(chain) / opt.nsamp_plot)
            samp = chain[::step]
            fig = plot_model(samp)
        else:
            fig = plot_model([P['median']])

    if opt.printpar and not filename.startswith('samples_burn'):
        from model import print_par
        print_par(P)

    if opt.display:
        print 'Displaying...'
        pl.show()

    print 'Done!'
Пример #16
0
cfg_temp = """\
abundances=None
table=None
cuba_name=None
run_cloudy=True
uvb_tilt=None
distance_starburst_kpc=None
overwrite=False
logfnu912=None
fesc=None
grains=False
constantpressure=False
"""

cfg_defaults = parse_config(StringIO(cfg_temp))

def write_example_grid_config():
    fh = open('grid.cfg', 'w')
    fh.write("""\
# Path to the CUBA file giving the UV background. 
cuba_name = /home/nhmc/code/repo/QSOClustering-svn/misc/CUBA/Q1G01/bkgthick.out
# Prefix for final grid file
prefix = qg
# Redshift for the UV background
z = 2.2
# Minimum, maximum and step for neutral hydrogen column density (cm^-2)
logNHI =   14.0  19.0 1.0
# Minimum, maximum and step for metallicity
logZ   =   -2.0  0.0  1.0
# Minimum, maximum and step for hydrogen density (cm^-3)
Пример #17
0
    'FeIII 1122': [(-300 + vzero, -100 + vzero), (155 + vzero, 400 + vzero)],
    'SiIII 1206': [(-300 + vzero, -95 + vzero), (-70 + vzero, 10 + vzero),
                   (115 + vzero, 195 + vzero), (200 + vzero, 220 + vzero),
                   (240 + vzero, 400 + vzero)],
    'CIII 977': [
        (-190 + vzero, -155 + vzero),
        (0 + vzero, 400 + vzero),
    ],
    'CII 1334': [(0, 300)],
    'SiII 1526': [(60, 300)],
    'NV 1238': [(90 + vzero, 200 + vzero), (280 + vzero, 403 + vzero)],
    'OVI 1031': [(-500, -360), (-100, -20), (45, 200)],
    'OVI 1037': [(-500, -290), (-100, +45), (70, 200)],
}

cfg = parse_config(config)
transitions = read_transitions(cfg.trfilename, ATOMDAT)

if 1:
    sp = barak.spec.read(cfg.spfilename)
    ndiv = 4.
    wa_dv = make_constant_dv_wa_scale(sp.wa[0], sp.wa[-1], cfg.Rfwhm / ndiv)

    expand_cont_adjustment = 5

    vp = readf26(cfg.f26name)
    lines = vp.lines[vp.lines.name != '<>']
    tau, ticks, alltau = find_tau(sp.wa, lines, ATOMDAT, per_trans=1)
    model = convolve_constant_dv(sp.wa, np.exp(-tau), wa_dv, ndiv)
    models = [
        convolve_constant_dv(sp.wa, np.exp(-t), wa_dv, ndiv) for t in alltau
Пример #18
0
Файл: cloudy.py Проект: nhmc/H2
def main():
    if not os.path.lexists('grid.cfg'):
        print ('./grid.cfg file not found, writing an example grid.cfg to '
               'the current directory')
        write_example_grid_config()
        sys.exit()

    cfg = parse_config('grid.cfg', defaults=cfg_defaults)
    cfg.overwrite = bool(cfg.overwrite)
    cfg.nproc = int(cfg.nproc)
    cfg.z = float(cfg.z)
    for k in 'logNHI lognH logZ'.split():
        vmin, vmax, step = map(float, cfg[k].split()) 
        cfg[k] = np.arange(vmin, vmax + 0.5*step, step) 
    
    print ''
    print 'Input values:'
    for k in sorted(cfg):
        print '  %s: %s' % (k, cfg[k])
    print ''

    if cfg.table is None:
        fluxname = cfg.prefix + '_temp_uvb.dat'
        uvb = calc_uvb(cfg.z, cfg.cuba_name, match_fg=True)
        writetable('cloudy_jnu_HM.tbl', [uvb['energy'], uvb['logjnu']],
                   overwrite=1,
                   units=['Rydbergs', 'log10(erg/s/cm^2/Hz/ster)'],
                   names=['energy', 'jnu'])

        if cfg.distance_starburst_kpc is not None:
            wa, F = read_starburst99('starburst.spectrum1')
            nu, logjnu = calc_local_jnu(wa, F, cfg.distance_starburst_kpc,
                                        cfg.fesc)
            energy = nu * hplanck / Ryd
            # use HM uvb energy limits
            cond = between(uvb['energy'], energy[0], energy[-1])
            logjnu1 = np.interp(uvb['energy'][cond], energy, logjnu)
            uvb['logjnu'][cond] = np.log10(10**uvb['logjnu'][cond] +
                                           10**logjnu1)
            writetable('cloudy_jnu_total.tbl', [uvb['energy'], uvb['logjnu']],
                       overwrite=1,
                       units=['Rydbergs', 'log10(erg/s/cm^2/Hz/ster)'],
                       names=['energy', 'jnu'])

        write_uvb(fluxname, uvb['energy'], uvb['logjnu'], cfg.overwrite)

        # Fnu at 1 Rydberg
        k = np.argmin(np.abs(uvb['energy'] - 1.))
        logfnu912 = np.log10(10**uvb['logjnu'][k] * 4 * pi)
    else:
        logfnu912 = cfg.logfnu912
        fluxname = None

    write_grid_input(cfg, fnu912=logfnu912, fluxfilename=fluxname,
                     table=cfg.table, abundances=cfg.abundances)

    if cfg.run_cloudy:
        run_grid(nproc=cfg.nproc, overwrite=cfg.overwrite)

    models = parse_grid(cfg)

    filename = cfg.prefix + '_grid.sav.gz'
    print 'Writing to', filename
    saveobj(filename, models, overwrite=cfg.overwrite)
Пример #19
0
Файл: model.py Проект: nhmc/LAE
def make_interpolators_uvbtilt(trans, simnames):
    """ Make interpolators including different UV slopes, given by the
    simulation names.

    simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),

    uvb k values must be sorted in ascending order!
    """

    Models = []
    aUV = []
    for simname in simnames:
        # need to define prefix, SIMNAME
        gridname = os.path.join(simname, 'grid.cfg')
    
        print 'Reading', gridname
        cfg = parse_config(gridname)
        aUV.append(cfg.uvb_tilt)
    
        name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
        print 'Reading', name
        M = loadobj(name)
        M = adict(M)

        Uconst = (M.U + M.nH)[0]
        print 'Uconst', Uconst, cfg.uvb_tilt
        assert np.allclose(Uconst, M.U + M.nH)
        Models.append(M)

    ##########################################################################
    # Interpolate cloudy grids onto a finer scale for plotting and
    # likelihood calculation
    ##########################################################################

    roman_map = {'I':0, 'II':1, 'III':2, 'IV':3, 'V':4, 'VI':5,
                 'VII':6, 'VIII':7, 'IX':8, 'X':9, '2':2}
    Ncloudy = {}
    Ncloudy_raw = {}
    print 'Interpolating...'
    for tr in trans + ['NH']:
        shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
        Nvals = np.zeros(shape)
        if tr in ['CII*']:
            for i,M in enumerate(Models):
                Nvals[:,:,:,i] = M.Nex[tr][:,:,:]
        elif tr == 'NH':
            for i,M in enumerate(Models):
                logNHI = M.N['H'][:,:,:,0]
                logNHII = M.N['H'][:,:,:,1]
                logNHtot = np.log10(10**logNHI + 10**logNHII)
                Nvals[:,:,:,i] = logNHtot            
        else:
            atom, stage = split_trans_name(tr)
            ind = roman_map[stage]
            for i,M in enumerate(Models):
                Nvals[:,:,:,i] = M.N[atom][:,:,:,ind]

        # use ndimage.map_coordinates (which is spline interpolation)
        coord = M.NHI, M.nH, M.Z, aUV
        try:
            Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
        except:
            import pdb; pdb.set_trace()

        Ncloudy_raw[tr] = Nvals

    print 'done'
    return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
Пример #20
0
    ax.set_xlim(grid.nH[0]+0.01, grid.nH[-1]-0.01)

    ax1 = ax.twiny()
    x0,x1 = ax.get_xlim()
    const = (grid.U + grid.nH)[0]
    assert np.allclose(const, grid.U + grid.nH)
    ax1.set_xlim(const - x0, const - x1)
    ax1.set_xlabel('$\log_{10}\ U$')


if 1:
    ##############################################
    # Read the model
    ##############################################
    
    cfg = parse_config(gridname)

    M = loadobj(os.path.join(prefix, simname, cfg.prefix + '_grid.sav.gz'))
    M = adict(M)

    # A finer grid of parameter values for interpolation below
    NHI = np.linspace(M.NHI[0], M.NHI[-1], 100)
    nH = np.linspace(M.nH[0], M.nH[-1], 101)
    Z = np.linspace(M.Z[0], M.Z[-1], 102)

    dNHI = NHI[1] - NHI[0]
    dnH = nH[1] - nH[0]
    dZ = Z[1] - Z[0]


if 1:
Пример #21
0
def main(args):
    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print pprint.pformat(opt)
    print '### Read parameters from model.cfg ###'

    filename, = args
    samples = loadobj(filename)

    mean_accept =  samples['accept'].mean()
    print 'Mean acceptance fraction', mean_accept
    nwalkers, nsamples, npar = samples['chain'].shape

    if not os.path.lexists('fig/'):
        os.mkdir('fig')

    if filename.startswith('samples_burn'):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        print 'Plotting burn-in sample posteriors'
        # bins for plotting posterior histograms
        P['bins'] = [np.linspace(lo, hi, opt.Nhistbins) for
                     lo,hi in zip(P['min'], P['max'])]

        fig,axes = plot_posteriors_burn(samples['chain'], P, npar=opt.npar)
        fig.suptitle('%i samples of %i walkers' % (
            nsamples, nwalkers), fontsize=14)
        fig.savefig('fig/posterior_burnin.' + opt.plotformat)
        
        print 'Plotting traces'
        fig, nwplot = plot_trace(samples['chain'])
        fig.suptitle('Chain traces for %i of %i walkers' % (nwplot,nwalkers))
        fig.savefig('fig/traces.' + opt.plotformat)

        if opt.autocorr:
            print 'Plotting autocorrelation'
            fig, axes = plot_autocorr(samples['chain'])
            fig.suptitle('Autocorrelation for %i walkers with %i samples. '
                         '(Mean acceptance fraction %.2f)' %
                         (nwalkers, nsamples, mean_accept), fontsize=14)
            fig.savefig('fig/autocorr.' + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples 
        chain = samples['chain'][:,0:Ns*Nt:Nt,:].reshape(-1, npar)


        # bins for plotting posterior histograms
        P['bins'] = []
        for i in xrange(len(P['names'])):
            x0, x1 = chain[:,i].min(), chain[:,i].max()
            dx = x1 - x0
            lo = x0 - 0.1*dx
            hi = x1 + 0.1*dx
            P['bins'].append( np.linspace(lo, hi, opt.Nhistbins) )


        levels = 0.6827, 0.9545
        P['p1sig'] = [find_min_interval(chain[:, i], levels[0]) for i
                      in range(npar)]
        P['p2sig'] = [find_min_interval(chain[:, i], levels[1]) for i
                      in range(npar)]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples['lnprob'][:,0:Ns*Nt:Nt].ravel()
        isort = lnprob.argsort()
        P['ijoint_sig'] = [isort[int((1-l)*len(lnprob)):] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P['p1sig_joint'] = []
        P['p2sig_joint'] = []
        for i in range(npar):
            lo = chain[P['ijoint_sig'][0], i].min()
            hi = chain[P['ijoint_sig'][0], i].max() 
            P['p1sig_joint'].append((lo, hi))
            lo = chain[P['ijoint_sig'][1], i].min()
            hi = chain[P['ijoint_sig'][1], i].max()
            P['p2sig_joint'].append((lo, hi))

        P['median'] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError('Scipy minimize not available')
            print 'Finding maximum likelihood parameter values'
            P['ml'] = minimize(lambda *x: -ln_likelihood(*x), P['ml'])
            print 'done'

        if opt.plotposteriors:
            print 'Plotting sample posteriors'
            fig, axes = plot_posteriors(chain, P, npar=opt.npar)
            fig.suptitle('%i of %i samples, %i walkers, thinning %i' % (
                Ns, nsamples, nwalkers, Nt), fontsize=14)
            fig.savefig('fig/posterior_mcmc.' + opt.plotformat)

    if opt.plotdata:
        print 'Plotting the maximum likelihood model and data'
        from model import plot_model
        fig = plot_model(P['ml'])
        fig.savefig('fig/model.' + opt.plotformat)

    if opt.printpar and not filename.startswith('samples_burn'):
        from model import print_par
        print_par(P)

    if opt.display:
        print 'Displaying...'
        pl.show()

    print 'Done!'
Пример #22
0
def process_options(args):
    opt = adict()
    filename = os.path.abspath(__file__).rsplit('/', 1)[0] + '/default.cfg'
    opt = parse_config(filename)
    if os.path.lexists('./plot.cfg'):
        opt = parse_config('./plot.cfg', opt)

    opt.atom = readatom(molecules=True)

    if opt.Rfwhm is not None:
        if isinstance(opt.Rfwhm, basestring):
            if opt.Rfwhm == 'convolve_with_COS_FOS':
                if convolve_with_COS_FOS is None:
                    raise ValueError('convolve_with_COS_FOS() not available')
                print('Using tailored FWHM for COS/FOS data')
                opt.Rfwhm = 'convolve_with_COS_FOS'
            elif opt.Rfwhm.endswith('fits'):
                print('Reading Resolution FWHM from', opt.Rfwhm)
                res = readtabfits(opt.Rfwhm)
                opt.Rfwhm = res.res / 2.354
            else:
                print('Reading Resolution FWHM from', opt.Rfwhm)
                fh = open(opt.Rfwhm)
                opt.Rfwhm = 1 / 2.354 * np.array([float(r) for r in fh])
                fh.close()
        else:
            opt.Rfwhm = float(opt.Rfwhm)

    if opt.features is not None:
        print('Reading feature list from', opt.features)
        opt.features = readtabfits(opt.features)

    if opt.f26 is not None:
        name = opt.f26
        print('Reading ions and fitting regions from', name)
        opt.f26 = readf26(name)
        opt.f26.filename = name

    if opt.transitions is not None:
        print('Reading transitions from', opt.transitions)
        fh = open(opt.transitions)
        trans = list(fh)
        fh.close()
        temp = []
        for tr in trans:
            tr = tr.strip()
            if tr and not tr.startswith('#'):
                junk = tr.split()
                tr = junk[0] + ' ' + junk[1]
                t = findtrans(tr, atomdat=opt.atom)
                temp.append(dict(name=t[0], wa=t[1][0], tr=t[1]))
        opt.linelist = temp
    else:
        opt.linelist = readtxt(get_data_path() + 'linelists/qsoabs_lines',
                        names='wa,name,select')

    if opt.f26 is None and opt.taulines is not None:
        print('Reading ions from', opt.taulines)
        fh = open(opt.taulines)
        lines = []
        for row in fh:
            if row.lstrip().startswith('#'):
                continue
            items = row.split()
            lines.append([items[0]] + list(map(float, items[1:])))
        fh.close()
        opt.lines = lines

    if opt.show_regions is None:
        opt.show_regions = True

    if hasattr(opt, 'aodname'):
        opt.aod = Table.read(opt.aodname)

    return opt