Ejemplo n.º 1
0
 
 # Custom redshift sampling
 z = np.concatenate(( [0.,], 
                      np.linspace(0.001, 1., 40), 
                      np.linspace(1.05, 3., 34), 
                      np.linspace(3.15, 10., 25) ))
 a = 1./(1.+z)
 pcts = [2.5, 16., 50., 84., 97.5]
 
 try:
     pct = np.load("%s.pctcache.npy" % fn)
     print("%s: Loaded percentiles from cache" % fn)
 except:
     # Load samples
     print("%s: Loading samples" % fn)
     dat = load_chain(fn)
     
     ode = []
     print(dat['h'].shape)
     #print("Walkers:", dat['walker'][1200000:1200000+10])
     
     # Choose random subset of samples
     #sample_idxs = range(1200000, dat['h'].size, 1)
     np.random.seed(SAMPLE_SEED)
     sample_idxs = np.random.randint(low=BURNIN, high=dat['h'].size, size=NSAMP)
     print("Selecting %d samples out of %d available (burnin %d discarded)" \
           % (sample_idxs.size, dat['h'].size - BURNIN, BURNIN))
     
     # Calculate model for selected samples
     for i in sample_idxs:
         pp = {key: dat[key][i] for key in dat.keys()}
Ejemplo n.º 2
0
#    f = open(fname, 'r')
#    hdr = f.readline()
#    f.close()
#    names = hdr[2:-1].split(' ')
#
#    # Load data
#    dat = np.genfromtxt(fname).T
#    data = {}
#    for i, n in enumerate(names):
#        data[n] = dat[i]
#    return data

if MODE == 'scatter':
    P.subplot(111)
    for i, fname in enumerate(fnames):
        dat = load_chain(fname)
        print(dat.keys())
        print(dat['w0'].shape)

        # Select random subsample
        sample_idxs = np.random.randint(low=BURNIN,
                                        high=dat['h'].size,
                                        size=NSAMP)

        # Steppyness
        #x = (dat['w0'][10000:] - dat['winf'][10000:]) / (1. + dat['zc'][10000:])
        #y = dat['logl'][10000:] #dat['zc'][10000:] # logL

        # w0, winf
        x = dat['w0'][sample_idxs]  # FIXME: 100k!
        y = dat['winf'][sample_idxs]
Ejemplo n.º 3
0
    P.hist(dat['logl'], bins=80, color=colours[i], histtype='step', 
           label=labels[i], density=True, range=ranges['logl'])

P.tight_layout()
P.show()
exit()
"""


fig = None
#axes = [P.subplot(321), P.subplot(322), P.subplot(323), 
#        P.subplot(324), P.subplot(325), P.subplot(326), ]
for i, fname in enumerate(fnames):
    
    # Load data
    dat = load_chain(fname, burnin=1500000)
    print(dat.keys())
    print(dat['w0'].shape)
    
    #idxs = np.where(dat['logl'] > np.max(dat['logl']) - 5.)
    
    lbls = [key for key in dat.keys()]
    print(lbls)
    lbls.remove('logl')
    lbls.remove('walker')
    
    
    data = np.array([dat[key] for key in lbls]).T
    
    """
    for j, pname in enumerate(['omegaM', 'w0', 'h', 'omegaB', 'winf', 'zc',]):
#!/usr/bin/env python
"""
Plot superposed 1D marginal histograms for two experiments.
"""
import numpy as np
import pylab as plt
from load_data_files import load_chain

fname1 = "chains/final_wztanh_seed200_cmb_lss"
fname2 = "chains/final_wztanh_seed200_cmb_lss_cosvis_pbw"

# Load data
d1 = load_chain(fname1)
d2 = load_chain(fname2)

# Reshape into per-walker chains
nw = np.max(d1['walker']).astype(int) + 1
for k in d1.keys():
    d1[k] = d1[k].reshape((-1, nw))
for k in d2.keys():
    d2[k] = d2[k].reshape((-1, nw))
#d2 = d2.reshape((d2.shape[0], -1, nw))
print(d1['walker'].shape, d2['walker'].shape)

# Create Figure
fig = plt.figure(constrained_layout=True)

gs = fig.add_gridspec(2, 3)
ax11 = fig.add_subplot(gs[0, 0])
ax12 = fig.add_subplot(gs[0, 1])
ax13 = fig.add_subplot(gs[0, 2])