예제 #1
0
    def run(self, kwargs_run):
        """
        run the Dynesty nested sampler

        see https://dynesty.readthedocs.io for content of kwargs_run

        :param kwargs_run: kwargs directly passed to DynamicNestedSampler.run_nested
        :return: samples, means, logZ, logZ_err, logL, results
        """
        print("prior type :", self.prior_type)
        print("parameter names :", self.param_names)

        self._sampler.run_nested(**kwargs_run)

        results = self._sampler.results
        samples_w = results.samples  # weighted samples
        logL = results.logl
        logZ = results.logz
        logZ_err = results.logzerr

        # Compute weighted mean and covariance.
        weights = np.exp(results.logwt - logZ[-1])  # normalized weights
        if np.sum(weights) != 1.:
            # TODO : clearly this is not optimal...
            # weights should by definition be normalized, but it appears that for very small
            # number of live points (typically in test routines),
            # it is not *quite* the case (up to 6 decimals)
            weights = weights / np.sum(weights)

        means, covs = dyfunc.mean_and_cov(samples_w, weights)

        # Resample weighted samples to get equally weighted (aka unweighted) samples
        samples = dyfunc.resample_equal(samples_w, weights)

        return samples, means, logZ, logZ_err, logL, results
예제 #2
0
파일: runtests.py 프로젝트: wym109/dynesty
def check_results(results, lz_tol, m_tol, c_tol, sig=5):
    pos = results.samples
    wts = np.exp(results.logwt - results.logz[-1])
    mean, cov = dyfunc.mean_and_cov(pos, wts)
    logz, logzerr = results.logz[-1], sampler.results.logzerr[-1]
    mean_check = np.all(np.abs(mean) < sig * m_tol)
    cov_check = np.all(np.abs(cov - C) < sig * c_tol)
    logz_check = abs((lnz_truth - logz)) < sig * lz_tol
    sys.stderr.write('\nlogz: {} | mean: {} | cov: {}\n'.format(
        logz_check, mean_check, cov_check))
예제 #3
0
def get_params_fit(results, return_sample=False):
    """ Get median, mean, covairance (and samples) from fitting results """
    samples = results.samples                                 # samples
    weights = np.exp(results.logwt - results.logz[-1])        # normalized weights 
    pmean, pcov = dyfunc.mean_and_cov(samples, weights)       # weighted mean and covariance
    samples_eq = dyfunc.resample_equal(samples, weights)      # resample weighted samples
    pmed = np.median(samples_eq,axis=0)
    
    if return_sample:
        return pmed, pmean, pcov, samples_eq
    else:
        return pmed, pmean, pcov
예제 #4
0
def test_gaussian():
    logz_tol = 1
    sampler = dynesty.NestedSampler(loglikelihood_gau,
                                    prior_transform_gau,
                                    ndim_gau,
                                    nlive=nlive)
    sampler.run_nested(print_progress=printing)

    # add samples
    # check continuation behavior
    sampler.run_nested(dlogz=0.1, print_progress=printing)

    # get errors
    nerr = 2
    for i in range(nerr):
        sampler.reset()
        sampler.run_nested(print_progress=False)
        results = sampler.results
        pos = results.samples
        wts = np.exp(results.logwt - results.logz[-1])
        mean, cov = dyfunc.mean_and_cov(pos, wts)
        logz = results.logz[-1]
        assert (np.abs(logz - logz_truth_gau) < logz_tol)
    # check summary
    res = sampler.results
    res.summary()

    # check plots
    dyplot.runplot(sampler.results)
    plt.close()
    dyplot.traceplot(sampler.results)
    plt.close()
    dyplot.cornerpoints(sampler.results)
    plt.close()
    dyplot.cornerplot(sampler.results)
    plt.close()
    dyplot.boundplot(sampler.results,
                     dims=(0, 1),
                     it=3000,
                     prior_transform=prior_transform_gau,
                     show_live=True,
                     span=[(-10, 10), (-10, 10)])
    plt.close()
    dyplot.cornerbound(sampler.results,
                       it=3500,
                       prior_transform=prior_transform_gau,
                       show_live=True,
                       span=[(-10, 10), (-10, 10)])
    plt.close()
예제 #5
0
def bootstrap_tol(results):
    """ Compute the uncertainty of means/covs by doing bootstrapping """
    n = len(results.logz)
    niter = 50
    pos = results.samples
    wts = np.exp(results.logwt - results.logz[-1])
    means = []
    covs = []

    for i in range(niter):
        xid = np.random.randint(n, size=n)
        mean, cov = dyfunc.mean_and_cov(pos[xid], wts[xid])
        means.append(mean)
        covs.append(cov)
    return np.std(means, axis=0), np.std(covs, axis=0)
예제 #6
0
def calm2l_dynesty(in_res, alfvar, use_keys, outname, pool):
    print('creating results file:\n {0}results_dynesty/res_dynesty_{1}.hdf5'.
          format(ALFPY_HOME, outname))
    f1 = h5py.File(
        "{0}results_dynesty/res_dynesty_{1}.hdf5".format(ALFPY_HOME, outname),
        "w")
    for ikey in [
            'samples', 'logwt', 'logl', 'logvol', 'logz', 'logzerr',
            'information'
    ]:
        dset = f1.create_dataset(ikey,
                                 dtype=np.float16,
                                 data=np.array(getattr(in_res, ikey),
                                               dtype=np.float16))

    samples, weights = in_res.samples, np.exp(in_res.logwt - in_res.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    samples = dyfunc.resample_equal(in_res.samples, weights)

    dset = f1.create_dataset('samples_eq',
                             dtype=np.float16,
                             data=np.array(samples, dtype=np.float16))
    dset = f1.create_dataset('mean',
                             dtype=np.float16,
                             data=np.array(mean, dtype=np.float16))
    dset = f1.create_dataset('cov',
                             dtype=np.float16,
                             data=np.array(cov, dtype=np.float16))
    #dset = f1.create_dataset('use_keys', dtype=dt, data=use_keys)

    nspec = samples.shape[0]
    select_ind = np.random.choice(np.arange(nspec), size=1000)
    samples = np.copy(samples[select_ind, :])

    tstart = time.time()

    pwork = partial(worker_m2l, alfvar, use_keys)
    ml_res = pool.map(pwork, samples)

    ndur = time.time() - tstart
    print('\npost processing dynesty results: {:.2f}minutes'.format(ndur /
                                                                    60.))

    dset = f1.create_dataset('m2l',
                             dtype=np.float16,
                             data=np.array(ml_res, dtype=np.float16))
    f1.close()
예제 #7
0
def check_results(results,
                  mean_truth,
                  cov_truth,
                  logz_truth,
                  mean_tol,
                  cov_tol,
                  logz_tol,
                  sig=5):
    """ Check if means and covariances match match expectations
    within the tolerances

    """
    pos = results.samples
    wts = np.exp(results.logwt - results.logz[-1])
    mean, cov = dyfunc.mean_and_cov(pos, wts)
    logz = results.logz[-1]
    npt.assert_array_less(np.abs(mean - mean_truth), sig * mean_tol)
    npt.assert_array_less(np.abs(cov - cov_truth), sig * cov_tol)
    npt.assert_array_less(np.abs((logz_truth - logz)), sig * logz_tol)
예제 #8
0
def calm2l_dynesty(in_res, alfvar, use_keys, outname, ncpu=1):
    print('creating results file:\n {0}results/res_dynesty_{1}.hdf5'.format(
        ALFPY_HOME, outname))
    f1 = h5py.File(
        "{0}results/res_dynesty_{1}.hdf5".format(ALFPY_HOME, outname), "w")
    for ikey in [
            'samples', 'logwt', 'logl', 'logvol', 'logz', 'logzerr',
            'information'
    ]:
        dset = f1.create_dataset(ikey,
                                 dtype=np.float16,
                                 data=getattr(in_res, ikey))

    samples, weights = in_res.samples, np.exp(in_res.logwt - in_res.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    samples = dyfunc.resample_equal(in_res.samples, weights)

    dset = f1.create_dataset('samples_eq', dtype=np.float16, data=samples)
    dset = f1.create_dataset('mean', dtype=np.float16, data=mean)
    dset = f1.create_dataset('cov', dtype=np.float16, data=cov)
    dset = f1.create_dataset('use_keys', data=use_keys)

    nspec = samples.shape[0]
    select_ind = np.random.choice(np.arange(nspec), size=1000)
    samples = np.copy(samples[select_ind, :])

    tstart = time.time()
    pwork = partial(worker_m2l, alfvar, use_keys)
    nspec = samples.shape[0]
    m2l_res = Parallel(n_jobs=ncpu)(delayed(pwork)(samples[i])
                                    for i in tqdm(range(nspec)))
    #m2l_res = executor.map(pwork, [ispec for ispec in samples])
    #pool = multiprocessing.Pool(ncpu)
    #m2l_res = pool.map(pwork, [ispec for ispec in samples])
    #pool.close()
    #pool.join()
    ndur = time.time() - tstart
    print('\npost processing dynesty results: {:.2f}minutes'.format(ndur /
                                                                    60.))

    dset = f1.create_dataset('m2l', dtype=np.float16, data=m2l_res)
    f1.close()
예제 #9
0
def test_gaussian():
    sig = 5
    rstate = get_rstate()
    g = Gaussian()
    sampler = dynesty.NestedSampler(g.loglikelihood,
                                    g.prior_transform,
                                    g.ndim,
                                    nlive=nlive,
                                    rstate=rstate)
    sampler.run_nested(print_progress=printing)
    # check that jitter/resample work
    # for not dynamic sampler
    dyfunc.jitter_run(sampler.results, rstate=rstate)
    dyfunc.resample_run(sampler.results, rstate=rstate)

    # add samples
    # check continuation behavior
    sampler.run_nested(dlogz=0.1, print_progress=printing)

    # get errors
    nerr = 3
    result_list = []
    for i in range(nerr):
        sampler.reset()
        sampler.run_nested(print_progress=False)
        results = sampler.results
        result_list.append(results)
        pos = results.samples
        wts = np.exp(results.logwt - results.logz[-1])
        mean, cov = dyfunc.mean_and_cov(pos, wts)
        logz = results.logz[-1]
        assert (np.abs(logz - g.logz_truth) < sig * results.logzerr[-1])
    res_comb = dyfunc.merge_runs(result_list)
    assert (np.abs(res_comb.logz[-1] - g.logz_truth) <
            sig * results.logzerr[-1])
    # check summary
    res = sampler.results
    res.summary()
예제 #10
0
def test_gaussian():
    logz_tol = 1
    sampler = dynesty.NestedSampler(loglikelihood_gau,
                                    prior_transform_gau,
                                    ntotdim,
                                    nlive=nlive,
                                    ncdim=ndim_gau)
    sampler.run_nested(print_progress=printing)
    # check that jitter/resample/simulate_run work
    # for not dynamic sampler
    dyfunc.jitter_run(sampler.results)
    dyfunc.resample_run(sampler.results)
    dyfunc.simulate_run(sampler.results)

    # add samples
    # check continuation behavior
    sampler.run_nested(dlogz=0.1, print_progress=printing)

    # get errors
    nerr = 2
    result_list = []
    for i in range(nerr):
        sampler.reset()
        sampler.run_nested(print_progress=False)
        results = sampler.results
        result_list.append(results)
        pos = results.samples
        wts = np.exp(results.logwt - results.logz[-1])
        mean, cov = dyfunc.mean_and_cov(pos, wts)
        logz = results.logz[-1]
        assert (np.abs(logz - logz_truth_gau) < logz_tol)
    res_comb = dyfunc.merge_runs(result_list)
    assert (np.abs(res_comb.logz[-1] - logz_truth_gau) < logz_tol)
    # check summary
    res = sampler.results
    res.summary()
예제 #11
0
def run(
    path=".",
    clobber=False,
    plot_all=False,
    plot_data=True,
    plot_latitude_pdf=True,
    plot_trace=False,
    plot_corner=False,
    plot_corner_transformed=True,
    plot_inclination_pdf=True,
    ncols=10,
    clip=False,
    fail_on_kwargs_mismatch=True,
    **kwargs,
):
    if not os.path.exists(path):
        os.makedirs(path)

    # Save the kwargs
    if clobber or not os.path.exists(os.path.join(path, "kwargs.json")):
        json.dump(
            defaults.update_with_defaults(**kwargs),
            open(os.path.join(path, "kwargs.json"), "w"),
        )
    else:
        input_kwargs = defaults.update_with_defaults(**kwargs)
        saved_kwargs = json.load(open(os.path.join(path, "kwargs.json"), "r"))
        if not (input_kwargs == saved_kwargs):
            if fail_on_kwargs_mismatch:
                raise ValueError(
                    "Input kwargs don't match saved kwargs for this run.")
        kwargs = saved_kwargs

    # Generate
    if clobber or not os.path.exists(os.path.join(path, "data.npz")):
        data = generate(**kwargs)
        np.savez(os.path.join(path, "data.npz"), **data)
    else:
        data = np.load(os.path.join(path, "data.npz"))

    # Plot the data
    if plot_all or plot_data:
        if clobber or not os.path.exists(os.path.join(path, "data.pdf")):
            fig = plot.plot_data(data, ncols=ncols, clip=clip, **kwargs)
            fig.savefig(os.path.join(path, "data.pdf"),
                        bbox_inches="tight",
                        dpi=300)

    # Sample
    if clobber or not os.path.exists(os.path.join(path, "results.pkl")):
        results = sample(data, **kwargs)
        pickle.dump(results, open(os.path.join(path, "results.pkl"), "wb"))
    else:
        results = pickle.load(open(os.path.join(path, "results.pkl"), "rb"))

    # Compute inclination pdf
    compute_inclination_pdf = defaults.update_with_defaults(
        **kwargs)["sample"]["compute_inclination_pdf"]
    if compute_inclination_pdf:
        if clobber or not os.path.exists(os.path.join(path,
                                                      "inclinations.npz")):
            inc_results = inclination.compute_inclination_pdf(
                data, results, **kwargs)
            np.savez(os.path.join(path, "inclinations.npz"), **inc_results)
        else:
            inc_results = np.load(os.path.join(path, "inclinations.npz"))
    else:
        inc_results = None

    # Transform latitude params and store posterior mean and cov
    if clobber or not os.path.exists(os.path.join(path, "mean_and_cov.npz")):
        samples = np.array(results.samples)
        samples[:, 1], samples[:, 2] = beta2gauss(samples[:, 1], samples[:, 2])
        try:
            weights = np.exp(results["logwt"] - results["logz"][-1])
        except:
            weights = results["weights"]
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        np.savez(os.path.join(path, "mean_and_cov.npz"), mean=mean, cov=cov)
    else:
        mean_and_cov = np.load(os.path.join(path, "mean_and_cov.npz"))
        mean = mean_and_cov["mean"]
        cov = mean_and_cov["cov"]

    # Plot the results
    if plot_all or plot_latitude_pdf:
        if clobber or not os.path.exists(os.path.join(path, "latitude.pdf")):
            fig = plot.plot_latitude_pdf(results, **kwargs)
            fig.savefig(os.path.join(path, "latitude.pdf"),
                        bbox_inches="tight")

    if plot_all or plot_trace:
        if clobber or not os.path.exists(os.path.join(path, "trace.pdf")):
            fig = plot.plot_trace(results, **kwargs)
            fig.savefig(os.path.join(path, "trace.pdf"), bbox_inches="tight")

    if plot_all or plot_corner:
        if clobber or not os.path.exists(os.path.join(path, "corner.pdf")):
            fig = plot.plot_corner(results, transform_beta=False, **kwargs)
            fig.savefig(os.path.join(path, "corner.pdf"), bbox_inches="tight")

    if plot_all or plot_corner_transformed:
        if clobber or not os.path.exists(
                os.path.join(path, "corner_transformed.pdf")):
            fig = plot.plot_corner(results, transform_beta=True, **kwargs)
            fig.savefig(
                os.path.join(path, "corner_transformed.pdf"),
                bbox_inches="tight",
            )

    if (plot_all or plot_inclination_pdf) and (compute_inclination_pdf):
        if clobber or not os.path.exists(os.path.join(path,
                                                      "inclination.pdf")):
            fig = plot.plot_inclination_pdf(data, inc_results, **kwargs)
            fig.savefig(os.path.join(path, "inclination.pdf"),
                        bbox_inches="tight")
예제 #12
0
def plot_ecc_corner():
    """
    Plot the eccentricity distributions and alpha, beta posteriors
    all together for disk1, disk2 (plane2), and non-disk.
    """

    # Load up the huge set of i, omega samples for all
    # the stars.
    pdfs_weights_file = work_dir + 'all_pdfs_weights.pkl'
    tmp = pop_fitter.load_pdfs_weights_pickle(pdfs_weights_file)

    pdf_dict = tmp[0]
    wgt_dict = tmp[1]
    d1_dict = tmp[2]
    d2_dict = tmp[3]
    grp_dict = tmp[4]

    # Load up the membership probabilities. 
    membership_file = work_dir + 'membership_probs.fits'
    prob_mem = Table.read(membership_file)

    # Define the groups and load up the sampler results for
    # each one. 
    groups = ['d2', 'nd', 'd1']
    # groups = ['d2', 'nd']
    ecc_results = {}

    # Load up the sampler results for all through groups.
    for group in groups:
        sampler_results_file = work_dir + 'dnest_ecc_' + group + '.pkl'
        _in = open(sampler_results_file, 'rb')
        ecc_results[group] = pickle.load(_in)
        _in.close()

    # Make a plot of the alpha-beta corners with all three shown.
    plt.close(1)
    foo = plt.subplots(2, 2, figsize=(6, 6), num=1)

    colors = {'d1': 'red', 'd2': 'blue', 'nd': 'grey'}

    for group in groups:
        print('')
        print('*** Results for ', group)
        results = ecc_results[group]
        samples = results.samples
        weights = np.exp(results.logwt - results.logz[-1])
        samples_equal = dyfunc.resample_equal(samples, weights)

        try:
            results.nlive
        except AttributeError:
            results.nlive = results.batch_nlive[-1]
    
        results.summary()
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        errors = np.diagonal(cov)**0.5

        param_names = ['alpha', 'beta']

        for ii in range(len(mean)):
            print('{0:5s} = {1:5.2f} +/- {2:5.2f}'.format(param_names[ii],
                                                          mean[ii], errors[ii]))
                                                      
        dyplot.cornerplot(results, fig=foo, labels=[r'$\alpha$', r'$\beta$'],
                          color=colors[group], quantiles=None)

    # Make a legend.
    plt.text(0.65, 0.75, 'disk1', color=colors['d1'],
                 fontsize=18, fontweight='bold',
                 transform=plt.gcf().transFigure)
    plt.text(0.65, 0.7, 'plane2', color=colors['d2'],
                 fontsize=18, fontweight='bold',
                 transform=plt.gcf().transFigure)
    plt.text(0.65, 0.65, 'other', color=colors['nd'],
                 fontsize=18, fontweight='bold',
                 transform=plt.gcf().transFigure)
    
    print('Existing axis limits: ')
    print('axes: 0, 0')
    print('    X = ', foo[1][0, 0].get_xlim())
    print('    Y = ', foo[1][0, 0].get_ylim())
    print('axes: 0, 1')
    print('    X = ', foo[1][0, 1].get_xlim())
    print('    Y = ', foo[1][0, 1].get_ylim())
    print('axes: 1, 0')
    print('    X = ', foo[1][1, 0].get_xlim())
    print('    Y = ', foo[1][1, 0].get_ylim())
    print('axes: 1, 1')
    print('    X = ', foo[1][1, 1].get_xlim())
    print('    Y = ', foo[1][1, 1].get_ylim())

    axgrid = foo[1]
    
    axgrid[1, 0].set_xlim(5e-1, 50)
    axgrid[0, 0].set_xlim(5e-1, 50)

    axgrid[1, 0].set_ylim(5e-1, 50)
    axgrid[1, 1].set_xlim(5e-1, 50)
    
    axgrid[0, 0].set_ylim(0, 0.013)
    axgrid[1, 1].set_ylim(0, 0.013)

    axgrid[0, 0].set_xscale('log')
    axgrid[1, 1].set_xscale('log')
    axgrid[1, 0].set_xscale('log')
    axgrid[1, 0].set_yscale('log')

    plt.savefig(work_dir + 'fig_ecc_corner.png')

    return
예제 #13
0
def pyorbit_dynesty(config_in, input_datasets=None, return_output=None):

    output_directory = './' + config_in['output'] + '/dynesty/'

    mc = ModelContainerDynesty()
    pars_input(config_in, mc, input_datasets)

    if mc.nested_sampling_parameters['shutdown_jitter']:
        for dataset in mc.dataset_dict.itervalues():
            dataset.shutdown_jitter()

    mc.model_setup()
    mc.create_variables_bounds()
    mc.initialize_logchi2()

    mc.create_starting_point()

    results_analysis.results_resumen(mc, None, skip_theta=True)

    mc.output_directory = output_directory

    print()
    print('Reference Time Tref: ', mc.Tref)
    print()
    print('*************************************************************')
    print()

    import dynesty

    # "Standard" nested sampling.
    sampler = dynesty.NestedSampler(mc.dynesty_call, mc.dynesty_priors,
                                    mc.ndim)
    sampler.run_nested()
    results = sampler.results

    # "Dynamic" nested sampling.
    dsampler = dynesty.DynamicNestedSampler(mc.dynesty_call, mc.dynesty_priors,
                                            mc.ndim)
    dsampler.run_nested()
    dresults = dsampler.results

    from dynesty import plotting as dyplot

    # Plot a summary of the run.
    rfig, raxes = dyplot.runplot(results)

    # Plot traces and 1-D marginalized posteriors.
    tfig, taxes = dyplot.traceplot(results)

    # Plot the 2-D marginalized posteriors.
    cfig, caxes = dyplot.cornerplot(results)

    from dynesty import utils as dyfunc

    # Extract sampling results.
    samples = results.samples  # samples
    weights = np.exp(results.logwt - results.logz[-1])  # normalized weights

    # Compute 5%-95% quantiles.
    quantiles = dyfunc.quantile(samples, [0.05, 0.95], weights=weights)

    # Compute weighted mean and covariance.
    mean, cov = dyfunc.mean_and_cov(samples, weights)

    # Resample weighted samples.
    samples_equal = dyfunc.resample_equal(samples, weights)

    # Generate a new set of results with statistical+sampling uncertainties.
    results_sim = dyfunc.simulate_run(results)
    """ A dummy file is created to let the cpulimit script to proceed with the next step"""
    nested_sampling_create_dummy_file(mc)

    if return_output:
        return mc
    else:
        return
예제 #14
0
# Plot the 2-D marginalized posteriors.
cfig, caxes = dyplot.cornerplot(results)


# we can post-process results

# Extract sampling results.
samples = results.samples  # samples
weights = np.exp(results.logwt - results.logz[-1])  # normalized weights

# Compute 10%-90% quantiles.
quantiles = [dyfunc.quantile(samps, [0.1, 0.9], weights=weights)
             for samps in samples.T]

# Compute weighted mean and covariance.
mean, cov = dyfunc.mean_and_cov(samples, weights)

# Resample weighted samples.
samples_equal = dyfunc.resample_equal(samples, weights)

# Generate a new set of results with statistical+sampling uncertainties.
results_sim = dyfunc.simulate_run(results)

cfig, caxes = dyplot.cornerplot(results_sim)


# print citations specfic to the configuration I am using
print(sampler.citations)


# this time, linear regression
예제 #15
0
def rebuild_current_distribution(
    fields: np.ndarray,
    ics: np.ndarray,
    jj_size: float,
    current_pattern: List[Union[Literal["f"], str]],
    sweep_invariants: List[Union[Literal["offset"], Literal["field_to_k"]]] = [
        "offset",
        "field_to_k",
    ],
    precision: float = 100,
    n_points: int = 2 ** 10 + 1,
) -> dict:
    """Rebuild a current distribution from a Fraunhofer pattern.

    This assumes a uniform field focusing since allowing a non uniform focusing
    would lead to a much larger space to explore.

    Parameters
    ----------
    fields : np.ndarray
        Out of plane field for which the critical current was measured.
    ics : np.ndarray
        Critical current of the junction.
    jj_size : float
        Size of the junction.
    current_pattern : List[Union[Literal["f"], str]]
        Describe in how many pieces to use to represent the junction. If the
        input arrays are more than 1D, "f" means that value is the same across
        all outer dimension, "v" means that the slice takes different value
        for all outer dimension (ie. one value per sweep).
    sweep_invariants : Tuple[Union[Literal["offset", "field_to_k"]]]
        Indicate what quantities are invariants across sweep for more the 1D
        inputs.
    precision : float, optional
        pass
    n_points : int, optional

    Returns
    -------
    dict


    """
    # Get the offset and estimated amplitude used in the prior
    # We do not use the estimated current and phase distribution to give the
    # more space to the algorithm.
    offsets, first_node_locs, _, _, _ = guess_current_distribution(
        field, fraunhofer, site_number, jj_size
    )
    # Gives a Fraunhofer pattern at the first node for v[1] = 1
    field_to_ks = 2 * np.pi / jj_size / np.abs(first_node_locs - offsets)

    # Determine the dimensionality of the problem based on the invariants and
    # the shape of the inputs.
    if len(sweep_invariants) > 2:
        raise ValueError("There are at most 2 invariants.")
    if any(k for k in sweep_invariants if k not in ("offset", "field_to_k")):
        raise ValueError(
            f"Invalid invariant specified {sweep_invariants}, "
            "valid values are 'offset', 'field_to_k'."
        )

    shape = fields.shape[:-1]
    shape_product = prod(shape) if shape else 0

    if shape_product == 0 and any(p.startswith("v") for p in current_pattern):
        raise ValueError(
            "Found variable current in the distribution but the measurements are 1D."
        )

    dim = len(sweep_invariants) + current_pattern.count("f")
    dim += shape_product * (current_pattern.count("v") + 2 - len(sweep_invariants))

    # Pre-compute slices to access elements in the prior and log-like
    offset_access = slice(
        0, 1 if "offset" in sweep_invariants else (shape_product or 1)
    )
    field_to_k_access = slice(
        offset_access.stop,
        offset_access.stop + 1
        if "field_to_k" in sweep_invariants
        else (shape_product or 1),
    )

    stop = field_to_k_access.stop
    current_density_accesses = []
    for p in current_pattern:
        if p == "f":
            current_density_accesses.append(slice(stop, stop + 1))
            stop += 1
        elif p == "v":
            current_density_accesses.append(slice(stop, stop + (shape_product or 1)))
            stop += current_density_accesses[-1].stop
        else:
            raise ValueError(
                f"Valid values in current_pattern are 'f' and 'v', found '{p}'"
            )

    def prior(u):
        """Map the sampled in 0-1 to the relevant values range.

        For all values we consider the values in the prior to be the log of the
        values we are looking for.

        """
        v = np.empty_like(u)
        v[offset_access] = 4 * u[offset_access] - 2
        v[field_to_k_access] = 4 * u[field_to_k_access] - 2
        stop += step

        # For all the amplitude we map the value between 0 and -X since the
        # amplitude of a single segment cannot be larger than the total current
        # X is determined based on the number of segments
        ampl = -np.log10(len(current_pattern))
        for sl in current_density_accesses:
            v[sl] = u[sl] * ampl

        return v

    def loglike(v):
        """Compute the distance to the data"""

        # We turn invariant input into their variant form (from 1 occurence in v
        # to n repetition in w) to ease a systematic writing of the loglike.
        stop = step = shape_product or 1

        w = np.empty((2 + len(current_pattern)) * (shape_product or 1))
        stop = step = shape_product or 1
        w[0:stop] = w_offset = v[offset_access]
        w[stop : stop + step] = w_f2k = v[field_to_k_access]
        stop += step
        for sl in current_density_accesses:
            w[stop : stop + step] = v[sl]

        # Pack the current distribution so that each line corresponds to different
        # conditions
        c_density = w[stop + step :].reshape((len(current_pattern), -1)).T

        err = np.empty_like(ics)

        it = np.nditer((offsets, first_node_locs, field_to_ks), ["multi_index"])
        for i, (off, fnloc, f2k) in enumerate(it):
            # Compute the offset
            f_off = off + np.sign(w_off[i]) * 10 ** -abs(w_off[i]) * fnloc

            # Compute the Fraunhofer pattern
            f = produce_fraunhofer_fast(
                (fields[it.multi_index] - f_off[i]),
                f2k * 10 ** w_f2k[i],
                jj_size,
                c_density[i],
                2 ** 10 + 1,
            )

            # Compute and store the error
            err[it.multi_index] = np.sum(
                (100 * (ics[it.multi_index] - f) / amplitude) ** 2
            )

        return -np.ravel(err)

    # XXX do that nasty part later
    sampler = NestedSampler(loglike, prior, dim)
    sampler.run_nested(dlogz=precision)
    res = sampler.results
    weights = np.exp(res.logwt - res.logz[-1])
    mu, cov = utils.mean_and_cov(res["samples"], weights)

    res["fraunhofer_params"] = {
        "offset": offset + np.sign(mu[0]) * 10 ** -abs(mu[0]) * first_node_loc,
        "field_to_k": 2 * np.pi / jj_size / abs(first_node_loc - offset) * 10 ** mu[1],
        "amplitude": amplitude * 10 ** mu[2],
        "current_distribution": np.array(
            [1 - np.sum(mu[3 : 3 + site_number - 1])]
            + list(mu[3 : 3 + site_number - 1])
        ),
        "phase_distribution": np.array(
            [0] + list(mu[3 + site_number - 1 : 3 + 2 * site_number - 2])
        ),
    }

    return res
예제 #16
0
def runMCMC(path, ndim, p, loglike, ptform, galname, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 8:
        nparams = '_8P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(res1,
                                     truths=np.array([
                                         pdict['vrot'][0], pdict['vrot'][1],
                                         pdict['vrot'][2], pdict['vrot'][3],
                                         pdict['vdisp'][0], pdict['vdisp'][1],
                                         pdict['vdisp'][2], pdict['vdisp'][3]
                                     ]),
                                     truth_color='black',
                                     show_titles=True,
                                     trace_cmap='viridis',
                                     connect=True,
                                     smooth=0.02,
                                     connect_highlight=range(8),
                                     labels=[
                                         r'$v_{rot,225}$', r'$v_{rot,450}$',
                                         r'$v_{rot,675}$', r'$v_{rot,900}$',
                                         r'$\sigma_{225}$', r'$\sigma_{450}$',
                                         r'$\sigma_{675}$', r'$\sigma_{900}$'
                                     ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=5,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))
    elif ndim == 9:
        nparams = '_9P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    elif ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

# Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
        for samps in samples.T
    ]

    pdict['sigmavrot'] = [(quantiles[0][0], quantiles[0][2]),
                          (quantiles[1][0], quantiles[1][2]),
                          (quantiles[2][0], quantiles[2][2]),
                          (quantiles[3][0], quantiles[3][2])]
    pdict['sigmavdisp'] = [(quantiles[4][0], quantiles[4][2]),
                           (quantiles[5][0], quantiles[5][2]),
                           (quantiles[6][0], quantiles[6][2]),
                           (quantiles[7][0], quantiles[7][2])]
    pdict['vrot'] = [
        quantiles[0][1], quantiles[1][1], quantiles[2][1], quantiles[3][1]
    ]
    pdict['vdisp'] = [
        quantiles[4][1], quantiles[5][1], quantiles[6][1], quantiles[7][1]
    ]

    if len(quantiles) == 9:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]

    if len(quantiles) == 10:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]
        pdict['phi'] = quantiles[9][1]
        pdict['sigmaphi'] = [(quantiles[9][0], quantiles[9][2])]

    # We don't need data entry, waste of space
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
예제 #17
0
def plot_a_results(results, pdfs, pdf_weights, suffix, a_min, a_max):
    samples = results.samples
    weights = np.exp(results.logwt - results.logz[-1])
    samples_equal = dyfunc.resample_equal(samples, weights)

    # results.summary()
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    errors = np.diagonal(cov)**0.5

    maxL_index = results['logl'].argmax()
    maxL_params = samples[maxL_index]

    param_names = ['alpha1']  #, 'alpha2', 'log_a_break', 'amp']

    for ii in range(len(mean)):
        print('{0:5s} = {1:5.2f} +/- {2:5.2f}, maxL = {3:5.2f}'.format(
            param_names[ii], mean[ii], errors[ii], maxL_params[ii]))

    plt.close('all')

    # dyplot.runplot(results)
    # plt.savefig('dnest_a_run_' + suffix + '.png')

    dyplot.traceplot(results)
    plt.savefig('dnest_a_trace_' + suffix + '.png')

    dyplot.cornerplot(results)
    plt.savefig('dnest_a_corner_' + suffix + '.png')

    # Make a plot of the resulting distributions.
    # Note these bins have to match what we used to make the PDFs in the first place.
    a_bin = np.logspace(3, 8, 50)
    # a_bin = np.linspace(1e3, 1e6, 100)
    a_bin_mid = a_bin[:-1] + np.diff(a_bin)

    alpha1 = mean[0]
    # alpha2 = mean[1]
    # a_break = 10**mean[2]

    # p_a = broken_powerlaw_trunc(a_bin_mid, alpha1, alpha2, a_break, a_min=a_min, a_max=a_max)
    p_a = powerlaw_trunc(a_bin_mid, alpha1, a_min=a_min, a_max=a_max)

    N_samp = 1000
    p_a_nk = np.zeros((len(a_bin_mid), N_samp), dtype=float)
    for ss in range(N_samp):
        # p_a_nk[:, ss] = broken_powerlaw_trunc(a_bin_mid,
        #                                           samples_equal[ss, 0],
        #                                           samples_equal[ss, 1],
        #                                           10**samples_equal[ss, 2],
        #                                           a_min=a_min, a_max=a_max)
        p_a_nk[:, ss] = powerlaw_trunc(a_bin_mid,
                                       samples_equal[ss, 0],
                                       a_min=a_min,
                                       a_max=a_max)

    fix, ax = plt.subplots(2, 1, sharex=True)
    plt.subplots_adjust(hspace=0)

    for ss in range(N_samp):
        ax[0].loglog(a_bin_mid, p_a_nk[:, ss], 'r-', linewidth=1, alpha=0.05)

    ax[0].loglog(a_bin_mid, p_a, 'r-', linewidth=5)

    # Plot the individual star PDFs
    a_bin_widths = np.diff(a_bin)

    for ss in range(pdfs.shape[0]):
        an, ab = np.histogram(pdfs[ss],
                              bins=a_bin,
                              weights=pdf_weights[ss],
                              density=False)
        an /= a_bin_widths
        ax[1].loglog(a_bin_mid, an, 'k-', linewidth=2, alpha=0.5)

    # Joint PDF:
    an, ab = np.histogram(pdfs.ravel(),
                          bins=a_bin,
                          weights=pdf_weights.ravel(),
                          density=False)
    an /= a_bin_widths
    ax[1].loglog(a_bin_mid, an, 'g-', linewidth=3)

    ax[1].set_xlabel('Semi-major Axis (AU)')
    ax[1].set_ylabel('PDF')
    ax[0].set_ylabel('PDF')

    plt.savefig('dnest_a_dist_' + suffix + '.png')

    return
예제 #18
0
def plot_ecc_results(results, pdfs, pdf_weights, suffix):
    samples = results.samples
    weights = np.exp(results.logwt - results.logz[-1])
    samples_equal = dyfunc.resample_equal(samples, weights)

    # results.summary()
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    errors = np.diagonal(cov)**0.5

    maxL_index = results['logl'].argmax()
    maxL_params = samples[maxL_index]

    param_names = ['alpha', 'beta']
    labels = ['$\\alpha$', '$\\beta$']

    for ii in range(len(mean)):
        print('{0:5s} = {1:5.2f} +/- {2:5.2f}, maxL = {3:5.2f}'.format(
            param_names[ii], mean[ii], errors[ii], maxL_params[ii]))
    plt.close('all')

    dyplot.runplot(results)
    plt.savefig('dnest_ecc_run_' + suffix + '.png')

    dyplot.traceplot(results, labels=labels)
    plt.savefig('dnest_ecc_trace_' + suffix + '.png')

    dyplot.cornerplot(results, labels=labels)
    plt.savefig('dnest_ecc_corner_' + suffix + '.png')

    # Make a plot of the resulting distributions.
    # Note these bins have to match what we used to make the PDFs in the first place.
    e_bin = np.arange(0, 1, 0.01)

    # Calculate the "best-fit" PDF.
    # p_e = scipy.stats.beta.pdf(e_bin, mean[0], mean[1])
    p_e = scipy.stats.beta.pdf(e_bin, maxL_params[0], maxL_params[1])

    # Make samples drawn from the posteriors.
    N_samp = 1000
    p_e_nk = np.zeros((len(e_bin), N_samp), dtype=float)
    for ss in range(N_samp):
        p_e_nk[:, ss] = scipy.stats.beta.pdf(e_bin, samples_equal[ss][0],
                                             samples_equal[ss][1])

    fix, ax = plt.subplots(2, 1, sharex=True)
    plt.subplots_adjust(hspace=0)

    for ss in range(N_samp):
        ax[0].plot(e_bin, p_e_nk[:, ss], 'r-', linewidth=1, alpha=0.05)

    ax[0].plot(e_bin, p_e, 'r-', linewidth=5)

    # Plot the individual star PDFs
    e_bin_edges = np.append(e_bin, 1.0)
    e_bin_widths = np.diff(e_bin_edges)

    for ss in range(pdfs.shape[0]):

        # # instantiate and fit the KDE model
        # kde = KernelDensity(bandwidth=1e-2, kernel='gaussian')
        # kde.fit(pdfs[ss][:, None], sample_weight=pdf_weights[ss])

        # # score_samples returns the log of the probability density
        # e_bin_kde = np.arange(0, 1.0, 5e-3)
        # logprob = kde.score_samples(e_bin_kde[:, None])
        # prob = np.exp(logprob)
        # prob *= pdf_weights[ss].sum()

        # ax[1].plot(e_bin_kde, prob, 'k-', color='green', linewidth=2, alpha=0.5)

        en, eb = np.histogram(pdfs[ss],
                              bins=e_bin_edges,
                              weights=pdf_weights[ss],
                              density=False)
        en /= e_bin_widths
        ax[1].plot(e_bin + (e_bin_widths / 2.0),
                   en,
                   'k-',
                   linewidth=2,
                   alpha=0.5)

    ax[1].set_xlabel('Eccentricity')
    ax[1].set_ylabel('PDF')
    ax[0].set_ylabel('PDF')

    # ax[0].set_ylim(0, 5)
    ylim1 = ax[1].get_ylim()
    ax[1].set_ylim(0, ylim1[1])

    plt.savefig('dnest_ecc_dist_' + suffix + '.png')

    return
예제 #19
0
def get_mean_params(res):
    samples, weights = res.samples, np.exp(res.logwt - res.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    return mean
예제 #20
0
def MCMC_diagnostic(path, ndim, p, loglike, ptform, galname, nlive, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=nlive,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': nlive,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        # Save nested data
        # obtain KL divergence
        klds = []
        for i in range(500):
            kld = dyfunc.kld_error(res1, error='simulate')
            klds.append(kld[-1])
        print(np.mean(klds))
        res1['KLval'] = np.mean(klds)
        with open(path + '/result_nested_P' + '{}'.format(nlive) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        fg, ax = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),  # 91.8,98.3,8.88,6.5,60,60
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=None,
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/cornerplot_' + galname + nparams + '.png')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    # Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.025, 0.5, 0.975], weights=weights)
        for samps in samples.T
    ]
    print(quantiles)

    # vrotsigma
    sigmavrot1_l = [i for i in samples[:, 0] if (i - MaP[0]) < 0]
    sigmavrot1_r = [i for i in samples[:, 0] if (i - MaP[0]) > 0]
    sigmavrot2_l = [i for i in samples[:, 1] if (i - MaP[1]) < 0]
    sigmavrot2_r = [i for i in samples[:, 1] if (i - MaP[1]) > 0]
    sigmavrot3_l = [i for i in samples[:, 2] if (i - MaP[2]) < 0]
    sigmavrot3_r = [i for i in samples[:, 2] if (i - MaP[2]) > 0]
    sigmavrot4_l = [i for i in samples[:, 3] if (i - MaP[3]) < 0]
    sigmavrot4_r = [i for i in samples[:, 3] if (i - MaP[3]) > 0]

    if len(sigmavrot1_l) == 0: sigmavrot1_l.append(0)
    if len(sigmavrot1_r) == 0: sigmavrot1_r.append(0)
    if len(sigmavrot2_l) == 0: sigmavrot2_l.append(0)
    if len(sigmavrot2_r) == 0: sigmavrot2_r.append(0)
    if len(sigmavrot3_l) == 0: sigmavrot3_l.append(0)
    if len(sigmavrot3_r) == 0: sigmavrot3_r.append(0)
    if len(sigmavrot4_l) == 0: sigmavrot4_l.append(0)
    if len(sigmavrot4_r) == 0: sigmavrot4_r.append(0)

    # vdispsigma
    sigmavdisp1_l = [i for i in samples[:, 4] if (i - MaP[4]) < 0]
    sigmavdisp1_r = [i for i in samples[:, 4] if (i - MaP[4]) > 0]
    sigmavdisp2_l = [i for i in samples[:, 5] if (i - MaP[5]) < 0]
    sigmavdisp2_r = [i for i in samples[:, 5] if (i - MaP[5]) > 0]
    sigmavdisp3_l = [i for i in samples[:, 6] if (i - MaP[6]) < 0]
    sigmavdisp3_r = [i for i in samples[:, 6] if (i - MaP[6]) > 0]
    sigmavdisp4_l = [i for i in samples[:, 7] if (i - MaP[7]) < 0]
    sigmavdisp4_r = [i for i in samples[:, 7] if (i - MaP[7]) > 0]

    if len(sigmavdisp1_l) == 0: sigmavdisp1_l.append(0)
    if len(sigmavdisp1_r) == 0: sigmavdisp1_r.append(0)
    if len(sigmavdisp2_l) == 0: sigmavdisp2_l.append(0)
    if len(sigmavdisp2_r) == 0: sigmavdisp2_r.append(0)
    if len(sigmavdisp3_l) == 0: sigmavdisp3_l.append(0)
    if len(sigmavdisp3_r) == 0: sigmavdisp3_r.append(0)
    if len(sigmavdisp4_l) == 0: sigmavdisp4_l.append(0)
    if len(sigmavdisp4_r) == 0: sigmavdisp4_r.append(0)

    pdict['sigmavrot'] = [(np.std(sigmavrot1_l), np.std(sigmavrot1_r)),
                          (np.std(sigmavrot2_l), np.std(sigmavrot2_r)),
                          (np.std(sigmavrot3_l), np.std(sigmavrot3_r)),
                          (np.std(sigmavrot4_l), np.std(sigmavrot4_r))]
    pdict['sigmavdisp'] = [(np.std(sigmavdisp1_l), np.std(sigmavdisp1_r)),
                           (np.std(sigmavdisp2_l), np.std(sigmavdisp2_r)),
                           (np.std(sigmavdisp3_l), np.std(sigmavdisp3_r)),
                           (np.std(sigmavdisp4_l), np.std(sigmavdisp4_r))]

    if len(MaP) == 8:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]

    if len(MaP) == 9:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

    if len(MaP) == 10:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        pdict['phi'] = MaP[9]

        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

        # phi
        sigmaphi_l = [i for i in samples[:, 9] if (i - MaP[9]) < 0]
        sigmaphi_r = [i for i in samples[:, 9] if (i - MaP[9]) > 0]
        if len(sigmaphi_l) == 0: sigmaphi_l.append(0)
        if len(sigmaphi_r) == 0: sigmaphi_r.append(0)
        pdict['sigmaphi'] = [(np.std(sigmaphi_l), np.std(sigmaphi_r))]

    # We don't need data entry
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
예제 #21
0
파일: runtests.py 프로젝트: wym109/dynesty
sampler.run_nested(dlogz=0.1, print_progress=printing)

# get errors
# check resets and repeated runs
sys.stderr.write('\n\nDeriving Errors\n')
means, covs, logzs = [], [], []
nerr = 50
for i in range(nerr):
    if printing:
        sys.stderr.write('\r{}/{}'.format(i + 1, nerr))
    sampler.reset()
    sampler.run_nested(print_progress=False)
    results = sampler.results
    pos = results.samples
    wts = np.exp(results.logwt - results.logz[-1])
    mean, cov = dyfunc.mean_and_cov(pos, wts)
    logz = results.logz[-1]
    means.append(mean)
    covs.append(cov)
    logzs.append(logz)
if printing:
    sys.stderr.write('\n')
lz_tol, m_tol, c_tol = (np.std(logzs), np.std(means,
                                              axis=0), np.std(covs, axis=0))
sys.stderr.write('logz_tol: {}\n'.format(lz_tol))
sys.stderr.write('mean_tol: {}\n'.format(m_tol))
sys.stderr.write('cov_tol: {}\n'.format(c_tol))

# check summary
sys.stderr.write('\nResults\n')
res = sampler.results