Esempio n. 1
0
# get evidence
hdf = h5py.File(outpost, "r")
a = hdf["lalinference"]["lalinference_nest"]
evsig = a.attrs["log_evidence"]
evnoise = a.attrs["log_noise_evidence"]
hdf.close()

# run bilby via the pe interface
runner = pe(
    data_file=hetfile,
    par_file=parfile,
    prior=priors,
    detector=detector,
    sampler="dynesty",
    sampler_kwargs={
        "Nlive": Nlive,
        "walks": 40
    },
    outdir=outdir,
    label=label,
)

result = runner.result

# evaluate the likelihood on a grid
gridpoints = 35
grid_size = dict()
for p in priors.keys():
    grid_size[p] = np.linspace(np.min(result.posterior[p]),
                               np.max(result.posterior[p]), gridpoints)
Esempio n. 2
0
    postsamples[:, i] = post[p.upper()]

# get evidence
hdf = h5py.File(outpost, "r")
a = hdf["lalinference"]["lalinference_nest"]
evsig = a.attrs["log_evidence"]
evnoise = a.attrs["log_noise_evidence"]
hdf.close()

# run bilby via the pe interface
runner = pe(
    data_file_1f=hetfiles[0],
    data_file_2f=hetfiles[1],
    par_file=parfile,
    prior=priors,
    detector=detector,
    sampler="dynesty",
    sampler_kwargs={"Nlive": Nlive, "walks": 60},
    outdir=outdir,
    label=label,
)

result = runner.result

# output comparisons
comparisons_two_harmonics(label, outdir, priors, cred=0.9)

# plot results
fig = result.plot_corner(save=False, parameters=list(priors.keys()), color="b")
fig = corner.corner(
    postsamples,
Esempio n. 3
0
postsamples = np.zeros((lp, len(priors)))
for i, p in enumerate(priors.keys()):
    postsamples[:, i] = post[p.upper()]

# get evidence
hdf = h5py.File(outpost, "r")
a = hdf["lalinference"]["lalinference_nest"]
evsig = a.attrs["log_evidence"]
evnoise = a.attrs["log_noise_evidence"]
hdf.close()

# run bilby via the pe interface
runner = pe(
    data_file=hetfiles,
    par_file=parfile,
    prior=priors,
    detector=detectors,
    outdir=outdir,
    label=label,
)

result = runner.result

# evaluate the likelihood on a grid
gridpoints = 35
grid_size = dict()
for p in priors.keys():
    grid_size[p] = np.linspace(np.min(result.posterior[p]),
                               np.max(result.posterior[p]), gridpoints)

grunner = pe(
    data_file=hetfiles,
Esempio n. 4
0
with open(parfile, "w") as fp:
    fp.write(parcontent)

# use pe to create the data and sample on a grid
detector = "H1"  # the detector to use
times = np.linspace(1000000000.0, 1000086340.0, 1440)  # times
asd = 1e-24

# set prior on h0
priors = dict()
priors["h0"] = Uniform(0.0, 1e-24, "h0")
h0s = np.linspace(0.0, 1e-24, 500)  # h0 values to evaluate at

run = pe(
    detector=detector,
    fake_times=times,
    par_file=parfile,
    inj_par=parfile,
    fake_asd=asd,
    grid=True,
    grid_kwargs={"grid_size": {
        "h0": h0s
    }},
    prior=priors,
)

pl.plot(h0s, np.exp(run.grid.ln_posterior - np.max(run.grid.ln_posterior)),
        "b")
pl.axvline(run.hetdata["H1"][0].par["H0"])
pl.show()
# get evidence
hdf = h5py.File(outpost, "r")
a = hdf["lalinference"]["lalinference_nest"]
evsig = a.attrs["log_evidence"]
evnoise = a.attrs["log_noise_evidence"]
hdf.close()

# run bilby via the pe interface
runner = pe(
    data_file=hetfiles,
    par_file=parfile,
    prior=priors,
    detector=detectors,
    sampler="dynesty",
    sampler_kwargs={
        "Nlive": Nlive,
        "walks": 40,
        "use_ratio": True
    },
    outdir=outdir,
    label=label,
    numba=True,
)

result = runner.result

# evaluate the likelihood on a grid
gridpoints = 35
grid_size = dict()
for p in priors.keys():
    grid_size[p] = np.linspace(np.min(result.posterior[p]),