コード例 #1
0
ファイル: ttv_system.py プロジェクト: astroshrey/ttvnest
    def posterior_summary(self, filename=None, equal_samples=False, ndig=6):

        results = self.results
        samples = results.samples
        nplanets = self.nplanets
        labels = self.fit_param_names
        if not equal_samples:
            weights = np.exp(results.logwt - results.logz[-1])
            quants = [dyfunc.quantile(samples[:,i],
             [0.025, 0.5, 0.975], weights = weights) for \
             i in range(samples.shape[1])]
        else:
            quants = [
                dyfunc.quantile(samples[:, i], [0.025, 0.5, 0.975])
                for i in range(samples.shape[1])
            ]

        if filename is not None:
            f = open(filename, 'w')
        else:
            f = None

        print("Summary (middle 95 percentile): ", file=f)
        for quant, label in zip(quants, labels):
            qlo, qmid, qhi = quant
            qup = qhi - qmid
            qdown = qmid - qlo
            print(label + ': $' + str(round(qmid, ndig)) + '^{+' +
                  str(round(qup, ndig)) + '}_{-' + str(round(qdown, ndig)) +
                  '}$',
                  file=f)
        if f is not None:
            f.close()
        return filename
コード例 #2
0
ファイル: plotting.py プロジェクト: arm61/model_select
 def write_latex_percentiles(self):
     """
     Write latex snipets for the values from the best model.
     """
     file_h5 = h5py.File('output/model^{}.h5'.format(self.best_file), 'r')
     best_labels = self.latex_p[np.argsort(self.logz)[-1]]
     samples, weights = file_h5['samples'], np.exp(
         file_h5['logwt'] - file_h5['logz'][-1], )
     percentiles = np.zeros((samples.shape[1], 3))
     for i in range(samples.shape[1]):
         percentiles[i] = utils.quantile(
             samples[:, i],
             np.array([0.025, 0.5, 0.975]),
             weights,
         )
         file_latex = open(
             'paper/{}_range.txt'.format(
                 best_labels[1:-1].split('$/$')[i], ),
             'w',
         )
         file_latex.write(
             r'${:.1f}'.format(percentiles[i, 1], ) + r'^{' +
             r'+{:.1f}'.format(percentiles[i, 2] - percentiles[i, 1], ) +
             r'}_{' +
             r'-{:.1f}'.format(percentiles[i, 1] - percentiles[i, 0]) +
             r'}$', )
         file_latex.close()
コード例 #3
0
ファイル: utils.py プロジェクト: tundeakins/Planet_tools
def dynesty_results(res, q=0.5):
    from dynesty import utils as dyfunc

    samples, weights = res.samples, np.exp(res.logwt - res.logz[-1])

    return [
        dyfunc.quantile(samples[:, i], q, weights)[0]
        for i in range(samples.shape[1])
    ]
コード例 #4
0
ファイル: plotting.py プロジェクト: arm61/model_select
    def plot_refl_best(self):
        """
        Plot reflectometry when the best evidence.
        """
        file_h5 = h5py.File('output/model^{}.h5'.format(self.best_file), 'r')
        samples, weights = file_h5['samples'], np.exp(
            file_h5['logwt'] - file_h5['logz'][-1], )
        percentiles = np.zeros((samples.shape[1], 3))
        for i in range(samples.shape[1]):
            percentiles[i] = utils.quantile(
                samples[:, i],
                np.array([0.025, 0.5, 0.975]),
                weights,
            )

        variables = {}
        for i, var in enumerate(self.best_file.split('_')):
            variables[var] = i

        from model import refl

        axes = plt.subplots(figsize=(10, 15))[1]
        gobj = refl(
            percentiles[:, 1],
            CON_LIST,
            variables,
            False,
        )
        for i, obj in enumerate(gobj.objectives):
            axes.errorbar(
                obj.data.x,
                obj.data.y * obj.data.x**4 * 10**i,
                obj.data.y_err * obj.data.x**4 * 10**i,
                ls='',
                zorder=10,
            )
            axes.plot(
                obj.data.x,
                obj.model(obj.data.x) * obj.data.x**4 * 10**i,
                'k',
            )
        axes.set_yscale('log')
        axes.set_xlabel(r'$q$/Å$^{-1}$')
        axes.set_ylabel(r'$R(q)q^4$')
        plt.savefig('paper/refl.pdf')
        plt.close()
コード例 #5
0
def pyorbit_dynesty(config_in, input_datasets=None, return_output=None):

    output_directory = './' + config_in['output'] + '/dynesty/'

    mc = ModelContainerDynesty()
    pars_input(config_in, mc, input_datasets)

    if mc.nested_sampling_parameters['shutdown_jitter']:
        for dataset in mc.dataset_dict.itervalues():
            dataset.shutdown_jitter()

    mc.model_setup()
    mc.create_variables_bounds()
    mc.initialize_logchi2()

    mc.create_starting_point()

    results_analysis.results_resumen(mc, None, skip_theta=True)

    mc.output_directory = output_directory

    print()
    print('Reference Time Tref: ', mc.Tref)
    print()
    print('*************************************************************')
    print()

    import dynesty

    # "Standard" nested sampling.
    sampler = dynesty.NestedSampler(mc.dynesty_call, mc.dynesty_priors,
                                    mc.ndim)
    sampler.run_nested()
    results = sampler.results

    # "Dynamic" nested sampling.
    dsampler = dynesty.DynamicNestedSampler(mc.dynesty_call, mc.dynesty_priors,
                                            mc.ndim)
    dsampler.run_nested()
    dresults = dsampler.results

    from dynesty import plotting as dyplot

    # Plot a summary of the run.
    rfig, raxes = dyplot.runplot(results)

    # Plot traces and 1-D marginalized posteriors.
    tfig, taxes = dyplot.traceplot(results)

    # Plot the 2-D marginalized posteriors.
    cfig, caxes = dyplot.cornerplot(results)

    from dynesty import utils as dyfunc

    # Extract sampling results.
    samples = results.samples  # samples
    weights = np.exp(results.logwt - results.logz[-1])  # normalized weights

    # Compute 5%-95% quantiles.
    quantiles = dyfunc.quantile(samples, [0.05, 0.95], weights=weights)

    # Compute weighted mean and covariance.
    mean, cov = dyfunc.mean_and_cov(samples, weights)

    # Resample weighted samples.
    samples_equal = dyfunc.resample_equal(samples, weights)

    # Generate a new set of results with statistical+sampling uncertainties.
    results_sim = dyfunc.simulate_run(results)
    """ A dummy file is created to let the cpulimit script to proceed with the next step"""
    nested_sampling_create_dummy_file(mc)

    if return_output:
        return mc
    else:
        return
コード例 #6
0
        # fig.savefig('/test/my_first_cornerplot.png')


'''Extract relevant results from the analysis'''

from dynesty.utils import quantile

# extrat the quantile data
full_qauntiles = []
for results in full_results:
    # extract samples and weights
    samples = results['samples']
    weights = np.exp(results['logwt'] - results['logz'][-1])
    print('Sample shape', samples.shape)

    quantiles = [quantile(x_i, q=[0.025, 0.5, 0.975], weights=weights) for x_i in samples.transpose()]
    full_qauntiles.append(quantiles)


ch4_quantiles = []
for results in ch4_results:
    # extract samples and weights
    samples = results['samples']
    weights = np.exp(results['logwt'] - results['logz'][-1])
    quantiles = [quantile(x_i, q=[0.025, 0.5, 0.975], weights=weights) for x_i in samples.transpose()]
    ch4_quantiles.append(quantiles)


'''Analyze the results'''

# Extract the evidience
        fig.suptitle('Red lines are true values', fontsize=14)
        # fig.savefig('/test/my_first_cornerplot.png')
'''Extract relevant results from the analysis'''

from dynesty.utils import quantile

# extrat the quantile data
full_qauntiles = []
for results in nitrogen_results:
    # extract samples and weights
    samples = results['samples']
    weights = np.exp(results['logwt'] - results['logz'][-1])
    print('Sample shape', samples.shape)

    quantiles = [
        quantile(x_i, q=[0.025, 0.5, 0.975], weights=weights)
        for x_i in samples.transpose()
    ]
    full_qauntiles.append(quantiles)

h2och4_quantiles = []
for results in h2och4_results:
    # extract samples and weights
    samples = results['samples']
    weights = np.exp(results['logwt'] - results['logz'][-1])
    quantiles = [
        quantile(x_i, q=[0.025, 0.5, 0.975], weights=weights)
        for x_i in samples.transpose()
    ]
    h2och4_quantiles.append(quantiles)
'''Analyze the results'''
コード例 #8
0
ファイル: INoDS_model.py プロジェクト: bansallab/INoDS-model
def summarize_sampler(sampler,
                      G_raw,
                      true_value,
                      output_filename,
                      nparam=None,
                      corner_plot=True):
    r""" Summarize the results of the sampler"""

    summary_type = "parameter_estimate"
    dres = sampler.results
    tf = open(output_filename + "_parameter_summary.txt", "w+")

    samples = dres.samples  #samples
    weights = np.exp(dres['logwt'] - dres['logz'][-1])  # normalized weights
    parameter_estimate = []
    for num in range(nparam):  # for each parameter
        CI = dyfunc.quantile(dres['samples'][:, num], [0.025, 0.5, 0.975],
                             weights=weights)
        parameter_estimate.append(CI[1])
        if num == 0:
            print(
                "The median estimate and 95% credible interval for beta is " +
                str(round(CI[1], 5)) + " [" + str(round(CI[0], 5)) + "," +
                str(round(CI[2], 5)) + "]")
            tf.write(
                "The median estimate and 95% credible interval for beta is " +
                str(round(CI[1], 5)) + " [" + str(round(CI[0], 5)) + "," +
                str(round(CI[2], 5)) + "]\n")
        elif num == 1:
            print(
                "The median estimate and 95% credible interval for epsilon is "
                + str(round(CI[1], 5)) + " [" + str(round(CI[0], 5)) + "," +
                str(round(CI[2], 5)) + "]")
            tf.write(
                "The median estimate and 95% credible interval for epsilon is "
                + str(round(CI[1], 3)) + " [" + str(round(CI[0], 5)) + "," +
                str(round(CI[2], 5)) + "]\n")
        else:
            print(
                "Printing median and 95% credible interval for the rest of the unknown parameters"
            )
            print(
                str(round(CI[1], 3)) + " [" + str(round(CI[0], 3)) + "," +
                str(round(CI[2], 3)) + "]")
            tf.write(
                "median and 95% credible interval for the rest of the unknown parameter #"
                + str(num) + "\n")
            tf.write(
                str(round(CI[1], 3)) + " [" + str(round(CI[0], 3)) + "," +
                str(round(CI[2], 3)) + "]\n")

    dlogZdynesty = dres.logz[-1]  # value of logZ
    dlogZerrdynesty = dres.logzerr[
        -1]  # estimate of the statistcal uncertainty on logZ

    # output log marginal likelihood
    tf.write("Log marginalized evidence of the network hypothesis is = " +
             str(round(dlogZdynesty, 3)) + "+/- " +
             str(round(dlogZerrdynesty, 3)) + "\n")
    print('Log marginalised evidence (using dynamic sampler) is {} +/- {}'.
          format(round(dlogZdynesty, 3), round(dlogZerrdynesty, 3)))

    tf.close()

    if corner_plot:
        dpostsamples = resample_equal(samples, weights)

        fig = corner.corner(dpostsamples,
                            labels=[r"$beta$", r"$epsilon$"],
                            quantiles=[0.16, 0.5, 0.84],
                            truths=true_value,
                            truth_color="red",
                            hist_kwargs={'density': True})

        fig.savefig(output_filename + "_" + summary_type + "_posterior.png")

    return parameter_estimate
コード例 #9
0
def MCMC_diagnostic(path, ndim, p, loglike, ptform, galname, nlive, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=nlive,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': nlive,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        # Save nested data
        # obtain KL divergence
        klds = []
        for i in range(500):
            kld = dyfunc.kld_error(res1, error='simulate')
            klds.append(kld[-1])
        print(np.mean(klds))
        res1['KLval'] = np.mean(klds)
        with open(path + '/result_nested_P' + '{}'.format(nlive) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        fg, ax = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),  # 91.8,98.3,8.88,6.5,60,60
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=None,
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/cornerplot_' + galname + nparams + '.png')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    # Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.025, 0.5, 0.975], weights=weights)
        for samps in samples.T
    ]
    print(quantiles)

    # vrotsigma
    sigmavrot1_l = [i for i in samples[:, 0] if (i - MaP[0]) < 0]
    sigmavrot1_r = [i for i in samples[:, 0] if (i - MaP[0]) > 0]
    sigmavrot2_l = [i for i in samples[:, 1] if (i - MaP[1]) < 0]
    sigmavrot2_r = [i for i in samples[:, 1] if (i - MaP[1]) > 0]
    sigmavrot3_l = [i for i in samples[:, 2] if (i - MaP[2]) < 0]
    sigmavrot3_r = [i for i in samples[:, 2] if (i - MaP[2]) > 0]
    sigmavrot4_l = [i for i in samples[:, 3] if (i - MaP[3]) < 0]
    sigmavrot4_r = [i for i in samples[:, 3] if (i - MaP[3]) > 0]

    if len(sigmavrot1_l) == 0: sigmavrot1_l.append(0)
    if len(sigmavrot1_r) == 0: sigmavrot1_r.append(0)
    if len(sigmavrot2_l) == 0: sigmavrot2_l.append(0)
    if len(sigmavrot2_r) == 0: sigmavrot2_r.append(0)
    if len(sigmavrot3_l) == 0: sigmavrot3_l.append(0)
    if len(sigmavrot3_r) == 0: sigmavrot3_r.append(0)
    if len(sigmavrot4_l) == 0: sigmavrot4_l.append(0)
    if len(sigmavrot4_r) == 0: sigmavrot4_r.append(0)

    # vdispsigma
    sigmavdisp1_l = [i for i in samples[:, 4] if (i - MaP[4]) < 0]
    sigmavdisp1_r = [i for i in samples[:, 4] if (i - MaP[4]) > 0]
    sigmavdisp2_l = [i for i in samples[:, 5] if (i - MaP[5]) < 0]
    sigmavdisp2_r = [i for i in samples[:, 5] if (i - MaP[5]) > 0]
    sigmavdisp3_l = [i for i in samples[:, 6] if (i - MaP[6]) < 0]
    sigmavdisp3_r = [i for i in samples[:, 6] if (i - MaP[6]) > 0]
    sigmavdisp4_l = [i for i in samples[:, 7] if (i - MaP[7]) < 0]
    sigmavdisp4_r = [i for i in samples[:, 7] if (i - MaP[7]) > 0]

    if len(sigmavdisp1_l) == 0: sigmavdisp1_l.append(0)
    if len(sigmavdisp1_r) == 0: sigmavdisp1_r.append(0)
    if len(sigmavdisp2_l) == 0: sigmavdisp2_l.append(0)
    if len(sigmavdisp2_r) == 0: sigmavdisp2_r.append(0)
    if len(sigmavdisp3_l) == 0: sigmavdisp3_l.append(0)
    if len(sigmavdisp3_r) == 0: sigmavdisp3_r.append(0)
    if len(sigmavdisp4_l) == 0: sigmavdisp4_l.append(0)
    if len(sigmavdisp4_r) == 0: sigmavdisp4_r.append(0)

    pdict['sigmavrot'] = [(np.std(sigmavrot1_l), np.std(sigmavrot1_r)),
                          (np.std(sigmavrot2_l), np.std(sigmavrot2_r)),
                          (np.std(sigmavrot3_l), np.std(sigmavrot3_r)),
                          (np.std(sigmavrot4_l), np.std(sigmavrot4_r))]
    pdict['sigmavdisp'] = [(np.std(sigmavdisp1_l), np.std(sigmavdisp1_r)),
                           (np.std(sigmavdisp2_l), np.std(sigmavdisp2_r)),
                           (np.std(sigmavdisp3_l), np.std(sigmavdisp3_r)),
                           (np.std(sigmavdisp4_l), np.std(sigmavdisp4_r))]

    if len(MaP) == 8:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]

    if len(MaP) == 9:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

    if len(MaP) == 10:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        pdict['phi'] = MaP[9]

        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

        # phi
        sigmaphi_l = [i for i in samples[:, 9] if (i - MaP[9]) < 0]
        sigmaphi_r = [i for i in samples[:, 9] if (i - MaP[9]) > 0]
        if len(sigmaphi_l) == 0: sigmaphi_l.append(0)
        if len(sigmaphi_r) == 0: sigmaphi_r.append(0)
        pdict['sigmaphi'] = [(np.std(sigmaphi_l), np.std(sigmaphi_r))]

    # We don't need data entry
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
コード例 #10
0
def runMCMC(path, ndim, p, loglike, ptform, galname, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 8:
        nparams = '_8P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(res1,
                                     truths=np.array([
                                         pdict['vrot'][0], pdict['vrot'][1],
                                         pdict['vrot'][2], pdict['vrot'][3],
                                         pdict['vdisp'][0], pdict['vdisp'][1],
                                         pdict['vdisp'][2], pdict['vdisp'][3]
                                     ]),
                                     truth_color='black',
                                     show_titles=True,
                                     trace_cmap='viridis',
                                     connect=True,
                                     smooth=0.02,
                                     connect_highlight=range(8),
                                     labels=[
                                         r'$v_{rot,225}$', r'$v_{rot,450}$',
                                         r'$v_{rot,675}$', r'$v_{rot,900}$',
                                         r'$\sigma_{225}$', r'$\sigma_{450}$',
                                         r'$\sigma_{675}$', r'$\sigma_{900}$'
                                     ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=5,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))
    elif ndim == 9:
        nparams = '_9P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    elif ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

# Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
        for samps in samples.T
    ]

    pdict['sigmavrot'] = [(quantiles[0][0], quantiles[0][2]),
                          (quantiles[1][0], quantiles[1][2]),
                          (quantiles[2][0], quantiles[2][2]),
                          (quantiles[3][0], quantiles[3][2])]
    pdict['sigmavdisp'] = [(quantiles[4][0], quantiles[4][2]),
                           (quantiles[5][0], quantiles[5][2]),
                           (quantiles[6][0], quantiles[6][2]),
                           (quantiles[7][0], quantiles[7][2])]
    pdict['vrot'] = [
        quantiles[0][1], quantiles[1][1], quantiles[2][1], quantiles[3][1]
    ]
    pdict['vdisp'] = [
        quantiles[4][1], quantiles[5][1], quantiles[6][1], quantiles[7][1]
    ]

    if len(quantiles) == 9:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]

    if len(quantiles) == 10:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]
        pdict['phi'] = quantiles[9][1]
        pdict['sigmaphi'] = [(quantiles[9][0], quantiles[9][2])]

    # We don't need data entry, waste of space
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
コード例 #11
0
# Plot traces and 1-D marginalized posteriors.
tfig, taxes = dyplot.traceplot(results)

# Plot the 2-D marginalized posteriors.
cfig, caxes = dyplot.cornerplot(results)


# we can post-process results

# Extract sampling results.
samples = results.samples  # samples
weights = np.exp(results.logwt - results.logz[-1])  # normalized weights

# Compute 10%-90% quantiles.
quantiles = [dyfunc.quantile(samps, [0.1, 0.9], weights=weights)
             for samps in samples.T]

# Compute weighted mean and covariance.
mean, cov = dyfunc.mean_and_cov(samples, weights)

# Resample weighted samples.
samples_equal = dyfunc.resample_equal(samples, weights)

# Generate a new set of results with statistical+sampling uncertainties.
results_sim = dyfunc.simulate_run(results)

cfig, caxes = dyplot.cornerplot(results_sim)


# print citations specfic to the configuration I am using