Exemple #1
0
def run_and_plot_models(segmentDF, adjacencyMatrix, iters, warmup):

    tobit_dict = get_tobit_dict(segmentDF)

    # TOBIT MODEL:
    t_c_params = {'adapt_delta': 0.95, 'max_treedepth': 15}
    tobit_model, tobit_fit = run_or_load_model('tobit', tobit_dict, iters,
                                               warmup, t_c_params)
    check_hmc_diagnostics(tobit_fit)

    plt.hist(tobit_fit['sigma'], bins=int(iters * 4 / 100))
    plt.title('tobit')
    tob_vars = ['sigma', 'beta_zero', 'theta']
    az.plot_trace(tobit_fit, tob_vars)

    # SPATIAL TOBIT MODEL:
    c_c_params = {'adapt_delta': 0.95, 'max_treedepth': 15}
    car_dict = add_car_info_to_dict(tobit_dict, adjacencyMatrix)
    car_model, car_fit = run_or_load_model('car', car_dict, iters, warmup,
                                           c_c_params)
    check_hmc_diagnostics(car_fit)

    plt.hist(car_fit['sigma'], bins=int(iters * 4 / 100))
    plt.title('car')
    car_vars = ['sigma', 'beta_zero', 'theta', 'alpha', 'tau']
    az.plot_trace(car_fit, compact=False, var_names=car_vars)

    az.plot_pair(car_fit, ['tau', 'alpha', 'sigma'], divergences=True)
    plt.scatter(car_fit['lp__'], car_fit['sigma'])
    plt.hist(car_fit['phi'].mean(axis=0), bins=50)
def az_mu_sigma_plot(stan_fit):
    """
        Function to demonstrate pystan theta convergence result through R_hat table, autocorrelation (3 chians), and trace plot
        """
    az.plot_trace(stan_fit, var_names=['sigma2', 'mu'], filter_vars="like")
    az.plot_autocorr(stan_fit, var_names=['sigma2', "mu"])
    az.plot_pair(stan_fit, var_names=['sigma2', "mu"], divergences=True)
Exemple #3
0
def param_posterior_arviz_plots(inferred, variables):
    az.plot_posterior(inferred, var_names=variables, kind='hist')
    az.plot_pair(inferred,
                 var_names=variables,
                 kind='hexbin',
                 colorbar=True,
                 divergences=True)
def __main__():
    tobit_data = prepare_tobit_data()
    ad_matrix = get_students_adjacency(tobit_data)
    # here, try any of the models defined before.
    fit, model = scaled_spare_car(tobit_data, ad_matrix)

    # y_cens look ok though
    # tau quite large -> 23, close to the largest „friend_group“
    # larger phis now :) in negative and positive!

    # investigate: can I use the normal_l(c)cdf function?
    # fit, model = tobit_simple_model(tobit_data, scaled=True)
    # fit, model = tobit_cum_sum_scaled(tobit_data)

    # fit, model = tobit_vec_QR(tobit_data)
    # note: this yields a expected values for β, but throws warnings for:
    # - Rhat (though everything is 1)
    # -

    az.plot_trace(fit, compact=True)
    az.plot_pair(fit, ['tau', 'alpha', 'sigma'], divergences=True)
    # seems like I'm having a lot of divergences where:
    # - sigma below 0.0025
    # - alpha > 0.99 (would imply IAR)
    # -> constraining helped a bit. But having region _around_ sigma = 0.08 and 0.04
    az.plot_energy(fit)
def az_v_theta_plot(stan_fit):
    """
        Function to demonstrate pystan theta convergence result through R_hat table, autocorrelation (3 chians), and trace plot
        """
    az.plot_trace(stan_fit, var_names=['v', 'theta'], filter_vars="like")
    print(stan_fit.stansummary())
    az.plot_autocorr(stan_fit, var_names=["v", 'theta'])
    az.plot_pair(stan_fit, var_names=["v", 'theta'], divergences=True)
Exemple #6
0
def plot_posteriors(sed):
    '''
    Plots the posterior distributions of an SED object.

    Arguments:
    sed: An SED object. Posteriors do not have to be computed yet, but it will be much faster if they are.
    '''
    plt.rc('text', usetex=False)
    plt.rc('font', family='serif')
    az.rcParams.update({"plot.max_subplots": 50})
    az.style.use(['arviz-darkgrid', 'arviz-purplish'])

    posts = sed.get_posteriors()

    if sed.sed_model in sed.overell_models:
        etd = ['allell']
    else:
        etd = sed.ells

    models = get_data(sed.post_dir,
                      ells_to_do=etd,
                      pols_to_do=[p + '_model' for p in sed.pols])

    for e in etd:
        for p in sed.pols:
            #print(list(posts['allell'].keys()))
            tr = posts[str(e)][p]

            vnames = [n for n in tr.varnames
                      if 'interval__' not in n]  # and 'correlation' not in n]

            with models[e][p + '_model']:
                az.plot_pair(tr,
                             kind='kde',
                             divergences=False,
                             marginals=True,
                             textsize=22,
                             kde_kwargs={'contour': True},
                             var_names=vnames)

            fig = plt.gcf()
            fig.suptitle('SED Parameter Posteriors for ell={}, {} Bin'.format(
                e, p),
                         fontsize=50,
                         y=0.98)

            if not os.path.exists('./sed_plots'):
                os.mkdir('./sed_plots')

            fname = f'./sed_plots/{e}_{p}_posteriors.png'
            print(f'Saving {fname}')
            plt.savefig(fname)
            plt.close(fig)

    return None
Exemple #7
0
def az_v_sigma2_plot(stan_fit, var_list=['v', 'sigma2']):
    """
        Function to demonstrate pystan v convergence result through R_hat table, autocorrelation (3 chians), and trace plot
        """

    #        print(az.summary(stan_fit, var_names=["v","sigma2",'W'], filter_vars="like"))
    print(az.summary(stan_fit, var_names=var_list + ['W']))
    #        az.plot_trace(stan_fit, var_names=['v','sigma2'], filter_vars="like")
    az.plot_trace(stan_fit, var_names=var_list)
    az.plot_autocorr(stan_fit, var_names=var_list)

    az.plot_pair(stan_fit, var_names=var_list, divergences=True)
Exemple #8
0
    def plot_corner(self, point_estimate='mean',plotfile=None,show=True):
        """ Plot the 1D and 2D marginal distributions of the inferred parameters

        Parameters
        ----------
        plotfile : str, optional
            Name of a file to write the plot to
        show : bool, optional, default False
            Whether to show the plot window

        """
        #For consistency's sake I'm going to re-invent the wheel here, and manually create a grid of plots from arviz, rather than letting corner do the work. This is because I want to make sure specific entries are plotted in a specific order.
        
        plot_vars = self.plot_trace_vars#[:-1]
        chol_coords = []
        if self.ndim == 2:
            #chol_coords.append(0)
            #chol_coords.append(1)
            chol_coords=(0,1)
            coords = {"chol_corr_dim_0":[0], "chol_corr_dim_1":[1]}
            #plot_vars.append("chol_corr[0,1]")
        else:
            coords = {"chol_corr_dim_0":[], "chol_corr_dim_1":[]}
            d0 = []
            d1 = []
            #raise NotImplementedError("Corner plots for data with more than 2 dimensions are not available yet!")
            for i in range(self.ndim - 1):
                for j in range(1,self.ndim - 1):
                    d0.append(i)
                    d1.append(j)
                    #print(i,j)
                    #chol_coords.append([i,j])#"chol_corr["+str(i)+","+str(j)+"]")

            coords["chol_corr_dim_0"] = xr.DataArray(d0, dims=['pointwise_sel'])
            coords["chol_corr_dim_1"] = xr.DataArray(d1, dims=['pointwise_sel'])
        #print(plot_vars)
        #coords = {"chol_corr":chol_coords}
        #print(coords)
        #corner = gs.GridSpec(rows, cols, figure=fig
        az.plot_pair(self.trace,
                     var_names = plot_vars,
                     coords = coords,
                     kind="kde",
                     marginals=True,
                     point_estimate=point_estimate,
                     show=show,
            )

        if isinstance(plotfile, str) and not show:
            plt.save(plotfile)
        elif not show:
            raise TypeError("plotfile must be a string")
def plot_pair_evolution(params, mcmc_kernel):

    files = []
    for file in os.listdir("./results"):
        if file.startswith("output_it"):
            files.append(file)
    files = sorted(files, key=lambda x: int(x[9:-4]))
    arvzs, cs = [], []

    for i, f in enumerate(files):
        with open(f"./results/{f}", "rb") as obj:
            i += 1
            samples, stats = pickle.load(obj)
            if mcmc_kernel == "hmc":
                stats_names = [
                    "logprob", "diverging", "acceptance", "step_size"
                ]
            elif mcmc_kernel == "nuts":
                stats_names = [
                    "logprob",
                    "tree_size",
                    "diverging",
                    "energy",
                    "acceptance",
                    "mean_tree_accept",
                ]
            sample_stats = {k: v for k, v in zip(stats_names, stats)}
            var_names = [p.name for p in params]
            posterior = {k: v for k, v in zip(var_names, samples)}
            arvzs.append(
                az.from_dict(posterior=posterior, sample_stats=sample_stats))
            cs.append(i / len(files))

    ax = az.plot_pair(
        arvzs[0],
        kind="scatter",
        marginals=True,
        marginal_kwargs={"color": cm.hot_r(cs[0])},
        scatter_kwargs={"c": cm.hot_r(cs[0])},
    )
    for arvz, c in zip(arvzs[0:], cs[0:]):
        az.plot_pair(
            arvz,
            kind="scatter",
            marginals=True,
            marginal_kwargs={"color": cm.hot_r(c)},
            scatter_kwargs={"c": cm.hot_r(c)},
            ax=ax,
        )

    fig = ax.ravel()[0].figure
    fig.savefig("./results/pair_plot_evo.png")
def create_diagnostic_plots(idx,pdf_filename,fit,diag_pars,niter,nchain):

    # Converting the Stan FIT object to Arviz InfereceData
    samples   = fit.extract(permuted=True) # Extracting parameter samples
    data      = az.from_pystan(fit)
    tmp       = data.posterior
    var_names = list(tmp.data_vars)

    # Filtering the list of parameters to plot
    unwanted  = {'losvd','spec','conv_spec','poly','bestfit','losvd_','losvd_mod','spec_pred','log_likelihood'}
    vars_main = [e for e in var_names if e not in unwanted]
   
    # Reading diagnostic parameters
    accept_stat, stepsize,  treedepth = np.zeros((niter,nchain)), np.zeros((niter,nchain)) , np.zeros((niter,nchain))
    n_leapfrog,  divergent, energy    = np.zeros((niter,nchain)), np.zeros((niter,nchain)) , np.zeros((niter,nchain))  
    for j in range(nchain):
        accept_stat[:,j] = diag_pars[j]['accept_stat__']
        stepsize[:,j]    = diag_pars[j]['stepsize__']
        treedepth[:,j]   = diag_pars[j]['treedepth__']
        n_leapfrog[:,j]  = diag_pars[j]['n_leapfrog__']
        divergent[:,j]   = diag_pars[j]['divergent__']
        energy[:,j]      = diag_pars[j]['energy__']    
 
    # Creating the plot in multiple PDF papges
    pdf_pages = PdfPages(pdf_filename)

    print(" - Sampler params")
    plot_sampler_params(idx,accept_stat,stepsize,treedepth,n_leapfrog,divergent,energy)
    pdf_pages.savefig()
    print(" - Chains")
    plot_chains(samples,vars_main)
    pdf_pages.savefig()
   #  print(" - Trace plot [Main params]")
   #  az.plot_trace(data, var_names=vars_main)
   #  pdf_pages.savefig()
   #  print(" - Trace plot [LOSVD]")
   #  az.plot_trace(data, var_names=['losvd'])
   #  pdf_pages.savefig()
    print(" - Pair plot")
    az.plot_pair(data, var_names=vars_main, divergences=True, kind='kde', fill_last=False)
    pdf_pages.savefig()
    print(" - Autocorr plot")
    az.plot_autocorr(data, var_names=vars_main)
    pdf_pages.savefig()
    print(" - Energy plot")
    az.plot_energy(data)
    pdf_pages.savefig()
    pdf_pages.close()   

    return
Exemple #11
0
 def plot_model_quality(self, var_names=None, **kwargs):
     assert hasattr(self, 'trace'), 'Run bayesian fitting first!'
     az.plot_trace(self.trace, var_names=var_names, show=True, **kwargs)
     try:
         az.plot_pair(
             self.trace,
             var_names=var_names,
             kind="hexbin",
             # coords=coords,
             colorbar=False,
             divergences=True,
             # backend="bokeh",
         )
     except ZeroDivisionError as e:
         print(e)
Exemple #12
0
 def plot_pairs(self):
     if not (self.mcmc_ and self.data_):
         raise AttributeError('Object needs to be fit first.')
     else:
         _ = az.plot_pair(  # NOQA
             self.data_,
             var_names=['mu', 'sigma', 'log_nu'],
             figsize=(10, 10))
         plt.show()
Exemple #13
0
 def plot_pair(self, var_names, figsize=(6, 4)):
     ax = az.plot_pair(
         self.idata,
         var_names=var_names,
         kind=["scatter", "kde"],
         kde_kwargs={"fill_last": False},
         marginals=True,
         # coords=coords,
         point_estimate="mean",
         figsize=figsize,
     )
Exemple #14
0
def analyze_post(post, method):
    print_summary(post, 0.95, False)
    fig, ax = plt.subplots()
    az.plot_forest(post, hdi_prob=0.95, figsize=(10, 4), ax=ax)
    plt.title(method)
    pml.savefig(f'multicollinear_forest_plot_{method}.pdf')
    plt.show()

    # post = m6_1.sample_posterior(random.PRNGKey(1), p6_1, (1000,))
    fig, ax = plt.subplots()
    az.plot_pair(post, var_names=["br", "bl"],
                 scatter_kwargs={"alpha": 0.1}, ax=ax)
    pml.savefig(f'multicollinear_joint_post_{method}.pdf')
    plt.title(method)
    plt.show()

    sum_blbr = post["bl"] + post["br"]
    fig, ax = plt.subplots()
    az.plot_kde(sum_blbr, label="sum of bl and br", ax=ax)
    plt.title(method)
    pml.savefig(f'multicollinear_sum_post_{method}.pdf')
    plt.show()
Exemple #15
0
def plot_param_diagnostics(mod,
                           incl_noise_params=False,
                           incl_trend_params=False,
                           incl_smooth_params=False,
                           which='trace',
                           **kwargs):
    """
    Parameters
    -----------
    mod : orbit model object
    which : str, {'density', 'trace', 'pair', 'autocorr', 'posterior', 'forest'}
    incl_noise_params : bool
        if plot noise parameters; default False
    incl_trend_params : bool
        if plot trend parameters; default False
    incl_smooth_params : bool
        if plot smoothing parameters; default False
    **kwargs :
        other parameters passed to arviz functions

    Returns
    -------
        matplotlib axes object
    """
    posterior_samples = get_arviz_plot_dict(
        mod,
        incl_noise_params=incl_noise_params,
        incl_trend_params=incl_trend_params,
        incl_smooth_params=incl_smooth_params)

    if which == "trace":
        axes = az.plot_trace(posterior_samples, **kwargs)
    elif which == "density":
        axes = az.plot_density(posterior_samples, **kwargs)
    elif which == "posterior":
        axes = az.plot_posterior(posterior_samples, **kwargs)
    elif which == "pair":
        axes = az.plot_pair(posterior_samples, **kwargs)
    elif which == "autocorr":
        axes = az.plot_autocorr(posterior_samples, **kwargs)
    elif which == "forest":
        axes = az.plot_forest(posterior_samples, **kwargs)
    else:
        raise Exception(
            "please use one of 'trace', 'density', 'posterior', 'pair', 'autocorr', 'forest' for kind."
        )

    return axes
Exemple #16
0
def sampled_variables_scatterplot(c):
    az.plot_pair(c,
                 var_names=[
                     'H_p', 'Om', 'w0', 'MMax_d_p', 'MMax_d_p_2sigma', 'alpha',
                     'beta', 'gamma'
                 ])
# Fit posterior with MCMC instead of analytically (for simplicity and flexibility)
# This is the same as BAP code, except we fix the noise variance to a constant.

with pm.Model() as model_g:
    w0 = pm.Normal('w0', mu=0, sd=10)
    w1 = pm.Normal('w1', mu=0, sd=1)
    #ϵ = pm.HalfCauchy('ϵ', 5)
    mu = pm.Deterministic('mu', w0 + w1 * x)
    #y_pred = pm.Normal('y_pred', mu=μ, sd=ϵ, observed=y)
    y_pred = pm.Normal('y_pred', mu=mu, sd=noiseSD, observed=y)
    trace_g = pm.sample(1000, cores=1, chains=2)

az.plot_trace(trace_g, var_names=['w0', 'w1'])

az.plot_pair(trace_g, var_names=['w0', 'w1'], plot_kwargs={'alpha': 0.1})
pml.savefig('linreg_2d_bayes_post_noncentered_data.pdf')
plt.show()

# To reduce the correlation between alpha and beta, we can center the data
x = x_orig - x_orig.mean()

# or standardize the data
#x = (x - x.mean())/x.std()
#y = (y - y.mean())/y.std()

with pm.Model() as model_g_centered:
    w0 = pm.Normal('w0', mu=0, sd=10)
    w1 = pm.Normal('w1', mu=0, sd=1)
    #ϵ = pm.HalfCauchy('ϵ', 5)
    mu = pm.Deterministic('mu', w0 + w1 * x)
    var_names=calibration_variable_names,
    kind='stats',
    round_to=15,
)
calibration_variable_modes = calculate_rv_posterior_mpv(
    pm_trace=seirdpq_trace_calibration,
    variable_names=calibration_variable_names)
df_stats_summary = add_mpv_to_summary(df_stats_summary,
                                      calibration_variable_modes)
df_stats_summary.to_csv("stats_summary_calibration.csv")
print(df_stats_summary)

az.plot_pair(
    seirdpq_trace_calibration,
    var_names=calibration_variable_names[1:],
    kind="hexbin",
    fill_last=False,
    figsize=(10, 8),
)
plt.savefig("seirpdq_marginals_cal.png")

# %%
percentile_cut = 2.5

y_min = np.percentile(seirdpq_trace_calibration["seirpdq_model"],
                      percentile_cut,
                      axis=0)
y_max = np.percentile(seirdpq_trace_calibration["seirpdq_model"],
                      100 - percentile_cut,
                      axis=0)
y_fit = np.percentile(seirdpq_trace_calibration["seirpdq_model"], 50, axis=0)
Exemple #19
0
"""
Hexbin PairPlot
===============

_thumb: .2, .5
"""
import matplotlib.pyplot as plt

import arviz as az

az.style.use("arviz-darkgrid")

centered = az.load_arviz_data("centered_eight")

coords = {"school": ["Choate", "Deerfield"]}
az.plot_pair(
    centered,
    var_names=["theta", "mu", "tau"],
    kind="hexbin",
    coords=coords,
    colorbar=True,
    divergences=True,
)
plt.show()
Exemple #20
0
"""
KDE Pair Plot
=============

_thumb: .2, .5
"""
import arviz as az

az.style.use('arviz-darkgrid')

centered = az.load_arviz_data('centered_eight')

coords = {'school': ['Choate', 'Deerfield']}
az.plot_pair(centered,
             var_names=['theta', 'mu', 'tau'],
             kind='kde',
             coords=coords,
             divergences=True,
             textsize=22)
Exemple #21
0
"""
KDE Pair Plot
=============

_thumb: .2, .5
"""
import arviz as az

az.style.use("arviz-darkgrid")

centered = az.load_arviz_data("centered_eight")

coords = {"school": ["Choate", "Deerfield"]}
az.plot_pair(
    centered,
    var_names=["theta", "mu", "tau"],
    kind="kde",
    coords=coords,
    divergences=True,
    textsize=22,
)
Exemple #22
0
"""
Joint Plot
==========

_thumb: .5, .8
"""
import matplotlib.pyplot as plt

import arviz as az

az.style.use("arviz-darkgrid")

data = az.load_arviz_data("non_centered_eight")

az.plot_pair(
    data,
    var_names=["theta"],
    coords={"school": ["Choate", "Phillips Andover"]},
    kind="hexbin",
    marginals=True,
    figsize=(10, 10),
)
plt.show()
    trace_g = pm.sample(2000, tune=1000)


# In[6]:


az.plot_trace(trace_g, var_names=['α', 'β', 'ϵ'])
plt.savefig('B11197_03_03.png', dpi=300)


# ### Modyfing the data before running the models

# In[7]:


az.plot_pair(trace_g, var_names=['α', 'β'], plot_kwargs={'alpha': 0.1})
plt.savefig('B11197_03_04.png', dpi=300)


# ### interpreting the posterior

# In[8]:


plt.plot(x, y, 'C0.')

alpha_m = trace_g['α'].mean()
beta_m = trace_g['β'].mean()

draws = range(0, len(trace_g['α']), 10)
plt.plot(x, trace_g['α'][draws] + trace_g['β'][draws]
Exemple #24
0
plt.legend(fontsize=16)
plt.tick_params(labelsize=16)

plt.savefig("npz/results.pdf", bbox_inches="tight", pad_inches=0.0)
plt.savefig("npz/results.png", bbox_inches="tight", pad_inches=0.0)

#ARVIZ part
import arviz
rc = {
    "plot.max_subplots": 1024,
}

try:
    arviz.rcParams.update(rc)
    arviz.plot_pair(arviz.from_numpyro(mcmc),
                    kind='kde',
                    divergences=False,
                    marginals=True)
    plt.savefig("npz/cornerall.png")
except:
    print("failed corner")

try:
    pararr = [
        "Mp", "Rp", "T0", "alpha", "MMR_CO", "MMR_H2O", "vsini", "RV", "q1",
        "q2", "logtau", "loga", "sigma"
    ]
    arviz.plot_trace(mcmc, var_names=pararr)
    plt.savefig("npz/trace.png")
except:
    print("failed trace")
    "Inclusion probability": "mean",
    "is_significant": "sum"
})

print(sig_table)

#%%

types = ["k__Bacteria;p__Proteobacteria", "k__Bacteria;p__FBP"]

ax = az.plot_pair(
    res_all,
    kind=["scatter", "kde"],
    kde_kwargs={"fill_last": False},
    marginals=True,
    coords={
        "chain": [33],
        "cell_type": types,
        "cell_type_nb": types
    },
    point_estimate="median",
)

plt.show()

#%%

az.rhat(res_all)

#%%

az.summary(res_all)
Exemple #26
0
 def plot_pair(self, **kwargs):
     """Pair plots of the posterior parameters."""
     return az.plot_pair(self.data, **kwargs)
Exemple #27
0
import arviz
from ckbit import rxn_ord

#Import data
file = './RO_data.xlsx'

#Run MAP estimation with standard priors
map1 = rxn_ord.MAP(filename=file)

#Run MCMC estimation with standard priors
m1, m2 = rxn_ord.MCMC(filename=file,control={'adapt_delta':0.99999999, 
                            'max_treedepth':100}, iters=1000, chains=2)

#Generate pairplot
arviz.plot_pair(m1)

#Run VI estimation with standard priors
v1, v2 = rxn_ord.VI(filename=file)

#Process data
data_dict={'intercept':v1['sampler_params'][0], 
           'rxn_ord':v1['sampler_params'][1], 
           'sigma':v1['sampler_params'][2]} 

#Generate pairplot
arviz.plot_pair(data_dict)

#Run MCMC estimation with specified priors
p1, p2 = rxn_ord.MCMC(filename=file,control={'adapt_delta':0.99999999,
                           'max_treedepth':100}, iters=1000,
Exemple #28
0
        P_mumu = 1 - 4 * (s23**2) * (c12**2) * (
            (s12**2) + (c12**2) * (c23**2)) * (theano.tensor.sin(delta31))**2

        P_mue_REAL = (c13**2)*(-(s13**2)*(s23**2)*(c12**2) -(c23*c12*s23*s13*s12  * theano.tensor.cos(delta)) \
                               -(s12**2)*(s13**2)*(s23**2) +((s12**2)*c23*s23*s13 * theano.tensor.cos(delta)))

        P_mue_IMAG = (c13**2)*(c23*c12*s23*s13*s12*theano.tensor.sin(delta)) \
                              -(s12**2)*(c23*s23*s13*theano.tensor.sin(delta))

        P_mue = -4 * P_mue_REAL * (theano.tensor.sin(
            delta31)**2) + 2 * P_mue_IMAG * theano.tensor.sin(2 * delta31)

        P_obs_mumu = pm.Normal("prob-MuMu",
                               mu=P_mumu,
                               observed=np.random.normal(0.27, 0.01, 200))
        P_obs_mue = pm.Normal("prob-MuE",
                              mu=P_mue,
                              observed=np.random.normal(0.40, 0.01, 200))

        #step = pm.Slice()     $$$we use NUTS as recomended$$$
        trace = pm.sample(tune=1000, draws=5000)  # ,return_inferencedata=True)

    az.plot_trace(trace)
    az.plot_pair(trace, divergences=True)
    """
    ax = plt.subplot()
    ax.hist(np.random.normal(0.27,0.02,500),bins = 30)
    ax.set_xlim(xmin = 0, xmax = 1)
    plt.show()
    az.summary(trace,round_to=2)
    """
"""
Point Estimate Pairplot
=======================

_thumb: .2, .5
"""
import matplotlib.pyplot as plt
import arviz as az


centered = az.load_arviz_data("centered_eight")

coords = {"school": ["Choate", "Deerfield"]}
ax = az.plot_pair(
    centered,
    var_names=["mu", "theta"],
    kind=["scatter", "kde"],
    kde_kwargs={"fill_last": False},
    diagonal=True,
    coords=coords,
    point_estimate="median",
    figsize=(10, 8),
    backend="bokeh",
)

plt.show()
Exemple #30
0
"""
Pair Plot
=========

_thumb: .2, .5
"""
import arviz as az

centered = az.load_arviz_data("centered_eight")

coords = {"school": ["Choate", "Deerfield"]}
ax = az.plot_pair(
    centered,
    var_names=["theta", "mu", "tau"],
    coords=coords,
    divergences=True,
    backend="bokeh",
)