Exemplo n.º 1
0
def read_in_result_list(args, results_filenames):
    print("Reading in results ...")
    results = []
    for f in tqdm.tqdm(results_filenames):
        try:
            results.append(read_in_result(f))
        except json.decoder.JSONDecodeError:
            pass
    print(f"Read in {len(results)} results from directory {args.directory}")

    print("Checking if results are complete")
    results_u = []
    for r in results:
        if r._posterior is not None:
            results_u.append(r)
    if len(results_u) < len(results):
        print(f"Results incomplete, truncating to {len(results_u)}")
        results = results_u
    else:
        print("Results complete")

    if args.print:
        print(
            f"List of result-labels: {sorted([res.label for res in results])}")
    return ResultList(results)
Exemplo n.º 2
0
    def test_results_odds(self):
        with pytest.raises(TypeError):
            # invalid results type
            results_odds(4.1)

        resfiles = find_results_files(self.resdir)

        # pass Result object
        for scale in ["log10", "ln"]:
            lo = results_odds(read_in_result(resfiles[self.pnames[0]]["H1"]),
                              scale=scale)
            assert isinstance(lo, float)

        # pass single file
        for scale in ["log10", "ln"]:
            lo = results_odds(resfiles[self.pnames[0]]["H1"], scale=scale)
            assert isinstance(lo, float)

        # pass invalid directory
        with pytest.raises(ValueError):
            results_odds(self.hetdir)

        # pass dictionary of files for single source
        losvn = results_odds(resfiles[self.pnames[0]], oddstype="svn")
        locvi = results_odds(resfiles[self.pnames[0]], oddstype="cvi")

        assert isinstance(losvn, float) and isinstance(locvi,
                                                       float) and losvn > locvi

        # pass without giving coherent multidetector result
        with pytest.raises(RuntimeError):
            results_odds(
                {det: resfiles[self.pnames[0]][det]
                 for det in self.dets[:2]})

        with pytest.raises(KeyError):
            rd = {det: resfiles[self.pnames[0]][det] for det in self.dets[:2]}
            rd.update({"H1L": resfiles[self.pnames[0]]["H1L1"]})
            results_odds(rd)

        # use all pulsars
        for oddstype in ["svn", "cvi"]:
            for scale in ["log10", "ln"]:
                lo = results_odds(self.resdir, oddstype=oddstype, scale=scale)
                assert isinstance(lo, dict)
                assert sorted(lo.keys()) == sorted(self.pnames)
                assert all([isinstance(v, float) for v in lo.values()])

        # get results for one detector
        with pytest.raises(KeyError):
            results_odds(self.resdir, oddstype="svn", det="V1")

        lo = results_odds(self.resdir, oddstype="svn", det="H1")
        assert isinstance(lo, dict)
        assert sorted(lo.keys()) == sorted(self.pnames)
        assert all([isinstance(v, float) for v in lo.values()])
Exemplo n.º 3
0
    def test_plot_inputs(self):
        """
        Test inputs to plotting class.
        """

        with pytest.raises(TypeError):
            # fails with no positional arguments
            _ = Plot()

        with pytest.raises(TypeError):
            # fails with position argument of the wrong type
            _ = Plot(1)

        # test reading in different objects
        for res in [
                self.lppen,
                read_in_result(self.cwinpy),
                Grid.read(self.grid)
        ]:
            plot = Plot(res)
            assert len(plot.results) == 1

        # test reading in dictionary - parameters is not specified, so should
        # fail with inconsistent parameter error
        with pytest.raises(ValueError):
            _ = Plot({
                "lppen": self.lppen,
                "CWInPy": read_in_result(self.cwinpy),
                "grid": Grid.read(self.grid),
            })

        # try reading non-existent files
        with pytest.raises(IOError):
            _ = Plot({"one": "no_file.hdf", "two": "blah.json"})

        plot = Plot(self.allresults, parameters=["h0", "psi"])
        assert len(plot.results) == len(self.allresults)

        for label, restype in zip(self.allresults.keys(),
                                  [Result, Result, Grid]):
            assert isinstance(plot.results[label], restype)
Exemplo n.º 4
0
def main():
    args, unknown_args = parse_args(sys.argv[1:], create_parser(__prog__))
    logger = logging.getLogger(__prog__)

    result = read_in_result(args.result)
    label = result.label

    # Test if the result was generated by parallel_bilby
    result_from_pbilby = False
    try:
        if result.meta_data["command_line_args"][
                "sampler"] == "parallel_bilby":
            result_from_pbilby = True
    except:
        pass

    if args.generate_samples_for_marginalized_parameters:
        single_trigger_likelihoods = []
        if result_from_pbilby:
            for i in range(args.n_triggers):
                l, _, _ = load_run_from_pbilby(args.data_dump_files[i])
                single_trigger_likelihoods.append(l)
        else:
            for i in range(args.n_triggers):
                l, _, _ = load_run_from_bilby(args.data_dump_files[i],
                                              args.trigger_ini_files[i])
                single_trigger_likelihoods.append(l)

        generate_joint_posterior_samples_from_marginalized_likelihood(
            result, single_trigger_likelihoods, ncores=args.ncores)
        label += "_marginalized_parameter_reconstructed"

    if args.flat_in_component_masses:
        result = reweight_flat_in_component_masses(result)
        label += "_reweighted"

    if args.uniform_in_comoving_volume:
        result = reweight_uniform_in_comoving_volume(result)
        label += "_reweighted"

    # Save to file
    logger.info("Done. Saving to file")
    result.label = label
    result.save_to_file(outdir=".")
Exemplo n.º 5
0
def comparisons(label, outdir, grid, priors, cred=0.9):
    """
    Perform comparisons of the evidence, parameter values, confidence
    intervals, and Kolmogorov-Smirnov test between samples produced with
    lalapps_pulsar_parameter_estimation_nested and cwinpy.
    """

    lppenfile = os.path.join(outdir, "{}_post.hdf".format(label))

    # get posterior samples
    post = read_samples(lppenfile,
                        tablename=LALInferenceHDF5PosteriorSamplesDatasetName)

    # get uncertainty on ln(evidence)
    info = h5py.File(lppenfile)["lalinference"]["lalinference_nest"].attrs[
        "information_nats"]
    nlive = h5py.File(lppenfile)["lalinference"]["lalinference_nest"].attrs[
        "number_live_points"]
    evsig = h5py.File(
        lppenfile)["lalinference"]["lalinference_nest"].attrs["log_evidence"]
    evnoise = h5py.File(lppenfile)["lalinference"]["lalinference_nest"].attrs[
        "log_noise_evidence"]
    everr = np.sqrt(info / nlive)  # the uncertainty on the evidence

    # read in cwinpy results
    result = read_in_result(outdir=outdir, label=label)

    # comparison file
    comparefile = os.path.join(outdir, "{}_compare.txt".format(label))

    # get grid-based evidence
    if grid is not None:
        grid_evidence = grid.log_evidence

    # set values to output
    values = 64 * [None]
    values[0:4] = evsig, evnoise, (evsig - evnoise), everr
    values[4:8] = (
        result.log_evidence,
        result.log_noise_evidence,
        result.log_bayes_factor,
        result.log_evidence_err,
    )
    if grid is not None:
        values[8] = "{0:.3f}".format(grid_evidence)
        values[9] = "{0:.3f}".format(grid_evidence - result.log_noise_evidence)
    else:
        values[8:10] = ("N/A", "N/A")  # no values supplied

    # output parameter means standard deviations, and credible intervals
    idx = 10
    for method in ["lalapps", "cwinpy"]:
        values[idx + 9] = int(cred * 100)
        for p in priors.keys():
            samples = post[
                p.upper()] if method == "lalapps" else result.posterior[p]

            # convert iota to cos(iota)
            if p == "iota":
                samples = np.cos(samples)

            mean = samples.mean()
            std = samples.std()
            low, high = credible_interval(samples, ci=cred)
            if p == "h0":
                exponent = int(np.floor(np.log10(mean)))
                values[idx] = mean / 10**exponent
                values[idx + 1] = std / 10**exponent
                values[idx + 2] = exponent
                values[idx + 10] = low / 10**exponent
                values[idx + 11] = high / 10**exponent
                values[idx + 12] = exponent
                idx += 3
            else:
                values[idx] = mean
                values[idx + 1] = std
                values[idx + 10] = low
                values[idx + 11] = high
                idx += 2
        idx += 10

    # output parameter maximum a-posteriori points
    maxidx = (result.posterior["log_likelihood"] +
              result.posterior["log_prior"]).idxmax()
    maxidxlppen = (post["logL"] + post["logPrior"]).argmax()
    for method in ["lalapps", "cwinpy"]:
        for p in priors.keys():
            maxpval = (post[p.upper()][maxidxlppen]
                       if method == "lalapps" else result.posterior[p][maxidx])
            if p == "h0":
                exponent = int(np.floor(np.log10(maxpval)))
                values[idx] = maxpval / 10**exponent
                values[idx + 1] = exponent
                idx += 2
            else:
                values[idx] = maxpval
                idx += 1
        if result.use_ratio:
            # convert likelihood ratio back to likelihood
            values[idx] = (post["logL"][maxidxlppen] if method == "lalapps"
                           else (result.posterior["log_likelihood"][maxidx] +
                                 result.log_noise_evidence))
        else:
            values[idx] = (post["logL"][maxidxlppen] if method == "lalapps"
                           else result.posterior["log_likelihood"][maxidx])
        idx += 1

    # calculate the Kolmogorov-Smirnov test for each 1d marginalised distribution,
    # and the Jensen-Shannon divergence, from the two codes. Output the
    # combined p-value of the KS test statistic over all parameters, and the
    # maximum Jensen-Shannon divergence over all parameters.
    values[idx] = np.inf
    pvalues = []
    jsvalues = []
    for p in priors.keys():
        _, pvalue = ks_2samp(post[p.upper()], result.posterior[p])
        pvalues.append(pvalue)

        # calculate J-S divergence
        bins = np.linspace(
            np.min([np.min(post[p.upper()]),
                    np.min(result.posterior[p])]),
            np.max([np.max(post[p.upper()]),
                    np.max(result.posterior[p])]),
            100,
        )

        hp, _ = np.histogram(post[p.upper()], bins=bins, density=True)
        hq, _ = np.histogram(result.posterior[p], bins=bins, density=True)
        jsvalues.append(jensenshannon(hp, hq)**2)

    values[idx] = combine_pvalues(pvalues)[1]
    idx += 1
    values[idx] = np.max(jsvalues)

    values[idx + 1] = cwinpy.__version__
    values[idx + 2] = bilby.__version__

    with open(comparefile, "w") as fp:
        fp.write(FILETEXT.format(*values))
Exemplo n.º 6
0
def main():
    args, unknown_args = parse_args(sys.argv[1:], create_parser(__prog__))
    logger = logging.getLogger(__prog__)

    result = read_in_result(args.result)
    label = result.label

    # Test if the result was generated by parallel_bilby
    result_from_pbilby = False
    try:
        if result.meta_data["command_line_args"][
                "sampler"] == "parallel_bilby":
            result_from_pbilby = True
    except:
        pass

    if args.generate_samples_for_marginalized_parameters:
        single_trigger_likelihoods = reconstruct_likelihoods(
            n_triggers=args.n_triggers,
            trigger_ini_files=args.trigger_ini_files,
            data_dump_files=args.data_dump_files,
            result_from_pbilby=result_from_pbilby,
        )

        if not args.not_from_hanabi:
            result = generate_joint_posterior_samples_from_marginalized_likelihood(
                result, single_trigger_likelihoods, ncores=args.ncores)
        else:
            if args.n_triggers != 1:
                raise ValueError("Does not understand input")
            result = generate_posterior_samples_from_marginalized_likelihood(
                result, single_trigger_likelihoods[0], ncores=args.ncores)
        label += "_marginalized_parameter_reconstructed"

    if args.generate_component_mass_parameters:
        # Edit file **in-place**
        result = generate_component_mass_parameters(result)

    if args.generate_snrs:
        single_trigger_likelihoods = reconstruct_likelihoods(
            n_triggers=args.n_triggers,
            trigger_ini_files=args.trigger_ini_files,
            data_dump_files=args.data_dump_files,
            result_from_pbilby=result_from_pbilby,
        )

        # Edit file **in-place**
        if not args.not_from_hanabi:
            result = generate_joint_snrs(result,
                                         single_trigger_likelihoods,
                                         ncores=args.ncores)
        else:
            if args.n_triggers != 1:
                raise ValueError("Does not understand input")
            result = generate_snrs(result,
                                   single_trigger_likelihoods[0],
                                   ncores=args.ncores)

    if args.reweight_to_prior is not None:
        result = reweight_to_prior(
            result,
            bilby.core.prior.PriorDict(filename=args.reweight_to_prior))
        label += "_reweighted"

    if args.flat_in_component_masses:
        result = reweight_flat_in_component_masses(result)
        label += "_reweighted"

    if args.uniform_in_comoving_volume:
        result = reweight_uniform_in_comoving_volume(result)
        label += "_reweighted"

    # Save to file
    logger.info("Done. Saving to file")
    result.label = label
    result.save_to_file(outdir=".", filename=args.output_filename)
Exemplo n.º 7
0
priors['s'] = bilby.core.prior.Uniform(0, 1, 's')
priors['d'] = bilby.core.prior.TruncatedGaussian(3.4, 0.5, 0, 100, 'd')
if anisotropy:
    priors['psi'] = bilby.core.prior.Uniform(0, 180, 'psi', boundary='periodic')
    priors['vism_psi'] = bilby.core.prior.Gaussian(0, 100, 'vism_psi')

    # define so don't have to change eta_model args
    priors['vism_ra'] = bilby.core.prior.DeltaFunction(0, 'vism_ra')
    priors['vism_dec'] = bilby.core.prior.DeltaFunction(0, 'vism_dec')
else:
    priors['vism_ra'] = bilby.core.prior.Gaussian(0, 100, 'vism_ra')
    priors['vism_dec'] = bilby.core.prior.Gaussian(0, 100, 'vism_dec')

    # define so don't have to change eta_model args
    priors['psi'] = bilby.core.prior.DeltaFunction(0, 'psi')
    priors['vism_psi'] = bilby.core.prior.DeltaFunction(0, 'vism_psi')

if results_file is None:
    results = bilby.core.sampler.run_sampler(
        likelihood, priors=priors, sampler='dynesty', label='dynesty',
        npoints=npoints, verbose=False, resume=False, 
        outdir='outdir_J1603')
else:        
    results = result.read_in_result(filename=results_file, 
                                    outdir=None, label=None, 
                                    extension='json', gzip=False)
    results.plot_with_data(arc_curvature, xdata, eta, ndraws=0, 
                           xlabel='Orbital phase', ylabel='Arc curvature')
results.plot_corner()
print(results)
Exemplo n.º 8
0
    tm_marg=False,
    red_var=False,  # don't estimate red noise to speed things up
    white_vary=True,  # estimate white noise (this defaults to True anyway)
)

# run using enterprise_warp to access bilby_mcmc
priors = bilby_warp.get_bilby_prior_dict(pta)
parameters = dict.fromkeys(priors.keys())
likelihood = bilby_warp.PTABilbyLikelihood(pta, parameters)

outdir = "test/"
label = "test_bilby"
bilby.run_sampler(likelihood=likelihood,
                  priors=priors,
                  outdir=outdir,
                  label=label,
                  sampler="bilby_mcmc",
                  nsamples=1000)

res = read_in_result(os.path.join(outdir, f"{label}_result.json"))

# convert F0 and F1 into true ranges
for i, p in enumerate(["F0", "F1"]):
    res.posterior[f"J0030+0451_timing model_tmparams_{i}"] = psr.t2pulsar[
        p].val + psr.t2pulsar[p].err * res.posterior[
            f"J0030+0451_timing model_tmparams_{i}"]

fig = res.plot_corner(
    filename="test_bilby.png",
    labels=["EFAC", "ECORR", "EQUAD", "$f_0$ (Hz)", "$\dot{f}$ (Hz/s)"])
Exemplo n.º 9
0
    def test_optimal_snr(self):
        with pytest.raises(TypeError):
            # invalid input type for results directory
            optimal_snr(9.8, self.hetdir)

        with pytest.raises(TypeError):
            # invalid input type for heterodyned data directory
            optimal_snr(self.resdir, 1.6)

        with pytest.raises(ValueError):
            # invalid "which" value
            optimal_snr(self.resdir, self.hetdir, which="blah")

        resfiles = find_results_files(self.resdir)
        hetfiles = find_heterodyned_files(self.hetdir)

        # get single detector, single source SNR
        snr = optimal_snr(resfiles[self.pnames[0]]["H1"],
                          hetfiles[self.pnames[0]]["H1"])
        assert isinstance(snr, float)

        # use a dictionary instead
        snr = optimal_snr(resfiles[self.pnames[0]]["H1"],
                          {"H1": hetfiles[self.pnames[0]]["H1"]})
        assert isinstance(snr, float)

        # check using likelihood gives same value as posterior (a flat prior was used to produce the files)
        snrl = optimal_snr(
            resfiles[self.pnames[0]]["H1"],
            hetfiles[self.pnames[0]]["H1"],
            which="likelihood",
        )
        assert snr == snrl

        # pass remove outliers flag
        snr = optimal_snr(
            resfiles[self.pnames[0]]["H1"],
            {"H1": hetfiles[self.pnames[0]]["H1"]},
            remove_outliers=True,
        )
        assert isinstance(snr, float)

        # pass result as Result object
        snr = optimal_snr(
            read_in_result(resfiles[self.pnames[0]]["H1"]),
            hetfiles[self.pnames[0]]["H1"],
        )
        assert isinstance(snr, float) and snr == snrl

        # pass heterodyned data as HeterodynedData object
        snr = optimal_snr(
            resfiles[self.pnames[0]]["H1"],
            HeterodynedData.read(hetfiles[self.pnames[0]]["H1"]),
        )
        assert isinstance(snr, float)

        # get single joint multi-detector result
        snr = optimal_snr(resfiles[self.pnames[0]]["H1L1"],
                          hetfiles[self.pnames[0]])
        assert isinstance(snr, float)

        # do the same, but with MultiHeterodynedData object
        snr = optimal_snr(
            resfiles[self.pnames[0]]["H1L1"],
            MultiHeterodynedData(hetfiles[self.pnames[0]]),
        )
        assert isinstance(snr, float)

        # get results for all pulsars and all detectors combination
        snr = optimal_snr(self.resdir, self.hetdir)
        assert isinstance(snr, dict)
        assert sorted(snr.keys()) == sorted(self.pnames)
        for k in snr:
            assert sorted(snr[k].keys()) == sorted(self.dets)
            assert all([isinstance(v, float) for v in snr[k].values()])

        # pass in par files directory
        snr = optimal_snr(self.resdir, self.hetdir, par=self.pardir)
        assert isinstance(snr, dict)
        assert sorted(snr.keys()) == sorted(self.pnames)
        for k in snr:
            assert sorted(snr[k].keys()) == sorted(self.dets)
            assert all([isinstance(v, float) for v in snr[k].values()])

        # get results for a single detector
        snr = optimal_snr(self.resdir, self.hetdir, det="H1")
        assert isinstance(snr, dict)
        assert sorted(snr.keys()) == sorted(self.pnames)
        assert all([isinstance(v, float) for v in snr.values()])