Example #1
0
def exercise4():
    with pm.Model() as basic_model:
        probabilities = [0.3, 0.7, 0.95]

        likelihood_params = np.array(
            [np.divide(1, 3) * (1 + 2 * prob) for prob in probabilities])

        group = pm.Categorical('group', p=np.array([1, 1, 1]))

        p = pm.Deterministic('p', theano.shared(likelihood_params)[group])

        positive_answers = pm.Binomial('positive_answers',
                                       n=num_questions,
                                       p=p,
                                       observed=[7])

        trace = pm.sample(4000, progressbar=True)

        az.plot_trace(trace)

        plt.show()

        az.plot_posterior(trace)

        plt.show()

        az.summary(trace)
        return trace
def az_mu_sigma_plot(stan_fit):
    """
        Function to demonstrate pystan theta convergence result through R_hat table, autocorrelation (3 chians), and trace plot
        """
    az.plot_trace(stan_fit, var_names=['sigma2', 'mu'], filter_vars="like")
    az.plot_autocorr(stan_fit, var_names=['sigma2', "mu"])
    az.plot_pair(stan_fit, var_names=['sigma2', "mu"], divergences=True)
Example #3
0
def __main__():
    tobit_data = prepare_tobit_data()
    ad_matrix = get_students_adjacency(tobit_data)
    # here, try any of the models defined before.
    fit, model = scaled_spare_car(tobit_data, ad_matrix)

    # y_cens look ok though
    # tau quite large -> 23, close to the largest „friend_group“
    # larger phis now :) in negative and positive!

    # investigate: can I use the normal_l(c)cdf function?
    # fit, model = tobit_simple_model(tobit_data, scaled=True)
    # fit, model = tobit_cum_sum_scaled(tobit_data)

    # fit, model = tobit_vec_QR(tobit_data)
    # note: this yields a expected values for β, but throws warnings for:
    # - Rhat (though everything is 1)
    # -

    az.plot_trace(fit, compact=True)
    az.plot_pair(fit, ['tau', 'alpha', 'sigma'], divergences=True)
    # seems like I'm having a lot of divergences where:
    # - sigma below 0.0025
    # - alpha > 0.99 (would imply IAR)
    # -> constraining helped a bit. But having region _around_ sigma = 0.08 and 0.04
    az.plot_energy(fit)
Example #4
0
def run(n_samples=1000):
    model = build_model()
    with model:
        trace = pm.sample(draws=n_samples, tune=1000, target_accept=0.99)

    az.plot_trace(trace)
    az.plot_forest(trace)
Example #5
0
def bayes_multiple_detector_I(t, s, n, tracename):
    with pm.Model() as abrupt_model:
        sigma = pm.Normal('sigma', mu=30, sigma=5)
        # sigma = pm.Uniform('sigma', 5, 15)
        mu = pm.Uniform("mu1", -30, 30)
        tau = pm.DiscreteUniform("tau" + "1", t.min(), t.max())

        for i in np.arange(2, n + 2):
            _mu = pm.Uniform("mu" + str(i), -100, 0)
            mu = T.switch(tau >= t, mu, _mu)
            if (i < (n + 1)):
                tau = pm.DiscreteUniform("tau" + str(i), tau, t.max())
        # add random walk
        # sigma_rw = pm.Uniform("sigma_rw", 0, 10)
        g_rw = pm.GaussianRandomWalk("g_rw", tau=1, shape=len(s))
        s_obs = pm.Normal("s_obs", mu=g_rw + mu, sigma=sigma, observed=s)
    # g = pm.model_to_graphviz(abrupt_model)
    # g.view()
    with abrupt_model:
        pm.find_MAP()
        trace = pm.sample(5000, tune=1000)
        az.plot_trace(trace)
        plt.show()
        az.plot_autocorr(trace)
        plt.show()
        az.to_netcdf(trace, getpath('tracepath') + tracename)
        pm.summary(trace)
    return trace
def bayes_multiple_detector_each_sigma(t, s, n):
    scala = 1000
    with pm.Model() as abrupt_model:
        sigma = pm.Normal('sigma', mu=0.02 * scala, sigma=0.015 * scala)
        # sigma = pm.Uniform('sigma', 5, 15)
        mu = pm.Uniform("mu1", -1.5 * scala, -1.4 * scala)
        tau = pm.DiscreteUniform("tau" + "1", t.min(), t.max())

        for i in np.arange(2, n + 2):
            _mu = pm.Uniform("mu" + str(i), -1.6 * scala, -1.4 * scala)
            mu = T.switch(tau >= t, mu, _mu)
            if i < (n + 1):
                ttau = pm.DiscreteUniform("tau" + str(i), tau, t.max())
                tau = ttau

        tau1 = abrupt_model["tau1"]
        tau2 = abrupt_model["tau2"]
        dtau = pm.DiscreteUniform('dtau', tau1 + 500, tau2)

        s_obs = pm.Normal("s_obs", mu=mu, sigma=sigma, observed=s)
    g = pm.model_to_graphviz(abrupt_model)
    g.view()
    with abrupt_model:
        # pm.find_MAP()
        trace = pm.sample(20000, tune=5000)
        az.plot_trace(trace)
        az.to_netcdf(trace, getpath('tracepath') + 'bd9_4_add_new_rule')
        plt.show()
        pm.summary(trace)
    return trace
Example #7
0
    def plotting(self):
        ## plot checks 
        az.plot_ppc(self.m_idata, num_pp_samples = 100, group = "prior")
        az.plot_ppc(self.m_idata, num_pp_samples = 100)

        ## plot trace
        az.plot_trace(self.m_idata)
Example #8
0
def basic_test():

    # Initialize model
    with pm.Model() as model:

        # E.g., to define a flat prior
        # with some limits
        #z = pm.Uniform('z', lower=0.0, upper=3.0)

        # prior
        mu = pm.Normal('mu', mu=0, sigma=1)

        # Observed data
        obs = pm.Normal('obs', mu=mu, sigma=1, observed=np.random.randn(1000))

        # Run sampler
        idata = pm.sample(2000, tune=1500, return_inferencedata=True)

    print(idata.posterior.dims)

    az.plot_trace(idata)

    summary = az.summary(idata)

    print("Summary:")
    print(summary)

    plt.show()

    return None
Example #9
0
def run_and_plot_models(segmentDF, adjacencyMatrix, iters, warmup):

    tobit_dict = get_tobit_dict(segmentDF)

    # TOBIT MODEL:
    t_c_params = {'adapt_delta': 0.95, 'max_treedepth': 15}
    tobit_model, tobit_fit = run_or_load_model('tobit', tobit_dict, iters,
                                               warmup, t_c_params)
    check_hmc_diagnostics(tobit_fit)

    plt.hist(tobit_fit['sigma'], bins=int(iters * 4 / 100))
    plt.title('tobit')
    tob_vars = ['sigma', 'beta_zero', 'theta']
    az.plot_trace(tobit_fit, tob_vars)

    # SPATIAL TOBIT MODEL:
    c_c_params = {'adapt_delta': 0.95, 'max_treedepth': 15}
    car_dict = add_car_info_to_dict(tobit_dict, adjacencyMatrix)
    car_model, car_fit = run_or_load_model('car', car_dict, iters, warmup,
                                           c_c_params)
    check_hmc_diagnostics(car_fit)

    plt.hist(car_fit['sigma'], bins=int(iters * 4 / 100))
    plt.title('car')
    car_vars = ['sigma', 'beta_zero', 'theta', 'alpha', 'tau']
    az.plot_trace(car_fit, compact=False, var_names=car_vars)

    az.plot_pair(car_fit, ['tau', 'alpha', 'sigma'], divergences=True)
    plt.scatter(car_fit['lp__'], car_fit['sigma'])
    plt.hist(car_fit['phi'].mean(axis=0), bins=50)
Example #10
0
def traceplot(*args, **kwargs):
    try:
        kwargs.setdefault('compact', True)
        return az.plot_trace(*args, **kwargs)
    except TypeError:
        kwargs.pop('compact')
        return az.plot_trace(*args, **kwargs)
Example #11
0
def problem_4(data):
    sm = pystan.StanModel(file='model_gq.stan')
    d = {'N': len(data), 'X': data, 'c': 30}
    fit = sm.sampling(d)
    print(fit.stansummary())
    arviz.plot_trace(fit,
                     var_names=("Var", "Coeff", "DeltaC", "QFPer", "ObsSec",
                                "RatioC"))
def az_v_theta_plot(stan_fit):
    """
        Function to demonstrate pystan theta convergence result through R_hat table, autocorrelation (3 chians), and trace plot
        """
    az.plot_trace(stan_fit, var_names=['v', 'theta'], filter_vars="like")
    print(stan_fit.stansummary())
    az.plot_autocorr(stan_fit, var_names=["v", 'theta'])
    az.plot_pair(stan_fit, var_names=["v", 'theta'], divergences=True)
def model10(path):
    #read_single_data()
    single_arrhenius_mi_code="""
    data { 
        int<lower=0> N;
        vector<lower=0>[N] s;
        vector<lower=0>[N] t;
        vector[N] y;
    }
    parameters {
        real<lower=0> phi0;
        real<lower=0> phi1;
        real<lower=0> beta;
        real<lower=0> sigmasq_eta;
    }
    transformed parameters {
        real<lower=0> sigma_eta;
        sigma_eta=sqrt(sigmasq_eta);

    }
    model {
        vector[N] ypred;
        phi0~normal(2,0.5);
        phi1~normal(13,3);
        beta~normal(0.5,0.1);
        for(i in 1:N)
            ypred[i]=phi0*exp(-phi1/s[i])*(t[i]^beta);
        y ~ normal(ypred, sigma_eta);
    }
    """
    data=pd.read_csv(path,encoding='GBK',header=None)
    TT=data.values.tolist()[0]
    SS=data.values.tolist()[1]
    YY=data.values.tolist()[2]
    plt.scatter(TT, YY,c='blue',s=1,alpha=0.3)
    plt.title('Degradation Diagram')#显示图表标题
    plt.xlabel('time')#x轴名称
    plt.ylabel('degradation data')#y轴名称
    plt.show()

    single_arrhenius_mi_data = {"N": len(YY),
                                 "s": SS,
                                 "t": TT,
                                 "y":YY
                                 }
    sm = pystan.StanModel(model_code=single_arrhenius_mi_code)
    fit = sm.sampling(data=single_arrhenius_mi_data,chains=4, iter=1000)
    print("fit=",fit)
    all_parameters=fit.extract(permuted=True)
    print("all_parameters=",all_parameters)
    phi0=all_parameters['phi0']
    print("phi0=",phi0) #a的结果
    b=fit.extract(permuted=False) #b是未排序的参数,用于绘图
    print("b=",b)
    arviz.plot_trace(fit)
    #,pars={"phi0","phi1","beta","sigma_eta","sigmasq_eta"}
    plt.show()
    print("model10:successful")
Example #14
0
def problem_3(data):
    sm = pystan.StanModel(file='model.stan')
    d = {'N': len(data), 'X': data}
    fit = sm.sampling(d)
    print(
        fit.stansummary(probs=(alpha / 2, alpha, 0.5, 1 - alpha,
                               1 - alpha / 2),
                        digits_summary=2))
    arviz.plot_trace(fit)
Example #15
0
def mcmc_traceplot(key, val, title=None, fpath=None):
    az.plot_trace({key: val})
    fig = plt.gcf()
    if title is not None:
        fig.suptitle(title)  # , fontsize='x-large', y=1.06)

    if fpath is not None:
        savefig(fig, fpath)

    return fig
Example #16
0
    def plot(self,
             type: str = 'dist',
             credible_interval: float = 0.94,
             point_estimate: str = 'mean',
             bins: Union[int, Sequence, str] = 'auto',
             round_to: int = 2,
             **kwargs):
        """General purpose plotting for hbayesdm-py.

        This function plots hyper-parameters.

        Parameters
        ----------
        type
            Current options are: 'dist', 'trace'. Defaults to 'dist'.
        credible_interval
            Credible interval to plot. Defaults to 0.94.
        point_estimate
            Show point estimate on plot.
            Options are: 'mean', 'median' or 'mode'. Defaults to 'mean'.
        bins
            Controls the number of bins. Defaults to 'auto'.
            Accepts the same values (or keywords) as plt.hist() does.
        round_to
            Controls formatting for floating point numbers. Defaults to 2.
        **kwargs
            Passed as-is to plt.hist().
        """
        type_options = ('dist', 'trace')
        if type not in type_options:
            raise RuntimeError('Plot type must be one of ' +
                               repr(type_options))

        if self.model_type == 'single':
            var_names = list(self.parameters_desc)
        else:
            var_names = ['mu_' + p for p in self.parameters_desc]

        if type == 'dist':
            kwargs.setdefault('color', 'black')
            axes = az.plot_posterior(self.fit,
                                     kind='hist',
                                     var_names=var_names,
                                     credible_interval=credible_interval,
                                     point_estimate=point_estimate,
                                     bins=bins,
                                     round_to=round_to,
                                     **kwargs)
            for ax, (p, desc) in zip(axes, self.parameters_desc.items()):
                ax.set_title('{} ({})'.format(p, desc))
        elif type == 'trace':
            az.plot_trace(self.fit, var_names=var_names)

        plt.show()
Example #17
0
def az_v_sigma2_plot(stan_fit, var_list=['v', 'sigma2']):
    """
        Function to demonstrate pystan v convergence result through R_hat table, autocorrelation (3 chians), and trace plot
        """

    #        print(az.summary(stan_fit, var_names=["v","sigma2",'W'], filter_vars="like"))
    print(az.summary(stan_fit, var_names=var_list + ['W']))
    #        az.plot_trace(stan_fit, var_names=['v','sigma2'], filter_vars="like")
    az.plot_trace(stan_fit, var_names=var_list)
    az.plot_autocorr(stan_fit, var_names=var_list)

    az.plot_pair(stan_fit, var_names=var_list, divergences=True)
Example #18
0
def _save_results(
    y: np.ndarray,
    mcmc: infer.MCMC,
    prior: Dict[str, jnp.ndarray],
    posterior_samples: Dict[str, jnp.ndarray],
    posterior_predictive: Dict[str, jnp.ndarray],
    *,
    var_names: Optional[List[str]] = None,
) -> None:

    root = pathlib.Path("./data/boston_pca_reg")
    root.mkdir(exist_ok=True)

    jnp.savez(root / "posterior_samples.npz", **posterior_samples)
    jnp.savez(root / "posterior_predictive.npz", **posterior_predictive)

    # Arviz
    numpyro_data = az.from_numpyro(
        mcmc,
        prior=prior,
        posterior_predictive=posterior_predictive,
    )

    az.plot_trace(numpyro_data, var_names=var_names)
    plt.savefig(root / "trace.png")
    plt.close()

    az.plot_ppc(numpyro_data)
    plt.legend(loc="upper right")
    plt.savefig(root / "ppc.png")
    plt.close()

    # Prediction
    y_pred = posterior_predictive["y"]
    y_hpdi = diagnostics.hpdi(y_pred)
    train_len = int(len(y) * 0.8)

    prop_cycle = plt.rcParams["axes.prop_cycle"]
    colors = prop_cycle.by_key()["color"]

    plt.figure(figsize=(12, 6))
    plt.plot(y, color=colors[0])
    plt.plot(y_pred.mean(axis=0), color=colors[1])
    plt.fill_between(np.arange(len(y)),
                     y_hpdi[0],
                     y_hpdi[1],
                     color=colors[1],
                     alpha=0.3)
    plt.axvline(train_len, linestyle="--", color=colors[2])
    plt.xlabel("Index [a.u.]")
    plt.ylabel("Target [a.u.]")
    plt.savefig(root / "prediction.png")
    plt.close()
def model09(path):
    #无应力幂指尺度
    regular_mi_code="""
    data { 
        int<lower=0> N;
        vector<lower=0>[N] t;
        vector[N] y;
    }
    parameters {
        real<lower=0> a;
        real<lower=0> beta;
        real<lower=0> sigmasq_eta;
    }
    transformed parameters {
        real<lower=0> sigma_eta;
        sigma_eta=sqrt(sigmasq_eta);
    }
    model {
        vector[N] ypred;
        a~uniform(0,10);
        beta~uniform(0,5);
        for(i in 1:N)
            ypred[i]=a*(t[i]^beta);
        y ~ normal(ypred, sigma_eta);
    }
    """

    data=pd.read_csv(path,encoding='GBK',header=None)
    TT=data.values.tolist()[0]
    YY=data.values.tolist()[1]
    plt.scatter(TT, YY,c='blue',s=1,alpha=0.3)
    plt.title('Degradation Diagram')#显示图表标题
    plt.xlabel('time')#x轴名称
    plt.ylabel('degradation data')#y轴名称
    plt.show()

    regular_exp_data = {"N": len(YY),
                        "t": TT,
                        "y": YY}
    sm = pystan.StanModel(model_code=regular_mi_code)
    fit = sm.sampling(data=regular_exp_data,chains=4, iter=1000)
    print("fit=",fit)
    all_parameters=fit.extract(permuted=True)
    print("all_parameters=",all_parameters)
    a=all_parameters['a']
    print("a=",a) #a的结果
    b=fit.extract(permuted=False) #b是未排序的参数,用于绘图
    print("b=",b)
    arviz.plot_trace(fit)
    plt.show()
    print("model09:successful")
def get_trace(RB_model):
    # Gradient-based sampling methods
    # see also: https://docs.pymc.io/notebooks/sampler-stats.html
    # and https://docs.pymc.io/notebooks/api_quickstart.html
    with RB_model:
        trace = pm.sample(draws=2000,
                          tune=10000,
                          target_accept=0.9,
                          return_inferencedata=True)

    with RB_model:
        az.plot_trace(trace)

    return trace
Example #21
0
 def plot_model_quality(self, var_names=None, **kwargs):
     assert hasattr(self, 'trace'), 'Run bayesian fitting first!'
     az.plot_trace(self.trace, var_names=var_names, show=True, **kwargs)
     try:
         az.plot_pair(
             self.trace,
             var_names=var_names,
             kind="hexbin",
             # coords=coords,
             colorbar=False,
             divergences=True,
             # backend="bokeh",
         )
     except ZeroDivisionError as e:
         print(e)
Example #22
0
def main(argv=None):
    RANDOM_SEED = 8927
    np.random.seed(RANDOM_SEED)
    az.style.use("arviz-darkgrid")
    T = 10000
    y = np.zeros((T, ))
    # true stationarity:
    true_theta = 0.95
    # true variance of the innovation:
    true_tau = 1.0
    # true process mean:
    true_center = 0.0

    for t in range(1, T):
        y[t] = true_theta * y[t - 1] + np.random.normal(loc=true_center,
                                                        scale=true_tau)

    y = y[-5000:]
    # 取-5000到最后一个数
    plt.plot(y, alpha=0.8)
    plt.xlabel("Timestep")
    plt.ylabel("$y$")
    # plt.show()
    # print(y)
    with pm.Model() as ar1:
        # assumes 95% of prob mass is between -2 and 2
        theta = pm.Normal("theta", 0.0, 1.0)
        # variance of the innovation term
        tau = pm.Exponential("tau", 0.5)
        # process mean
        center = pm.Normal("center", mu=0.0, sigma=1.0)

        likelihood = pm.AR1("y", k=theta, tau_e=tau, observed=y - center)

        trace = pm.sample(2000,
                          tune=2000,
                          init="advi+adapt_diag",
                          random_seed=RANDOM_SEED)
        idata = az.from_pymc3(trace)
        az.plot_trace(
            idata,
            lines=[
                ("theta", {}, true_theta),
                ("tau", {}, true_tau),
                ("center", {}, true_center),
            ],
        )
        plt.show()
def main():
    mu_true = 5.0
    sigma_true = 1.0
    data = np.random.normal(loc=mu_true, scale=sigma_true, size=10000)
    loglike = Loglike(data)
    #utt.verify_grad(loglike, [np.array([3.0, 2.0])])
    # verify_grad passes with no errors
    with pm.Model() as model:
        mu = pm.Normal('mu', mu=4.0, sigma=2.0, testval=4.0)
        sigma = pm.HalfNormal('sigma', sigma=5.0, testval=2.0)
        theta = tt.as_tensor_variable([mu, sigma])
        like = pm.Potential('like', loglike(theta))
    with model:
        trace = pm.sample()
        print(pm.summary(trace).to_string())

        # plot the traces
        _ = az.plot_trace(trace, lines={"mu": mu_true, "sigma": sigma_true})

        # put the chains in an array (for later!)
        samples = np.vstack((trace["mu"], trace["sigma"])).T

        # corner plot
        fig = corner.corner(samples,
                            labels=[r"$mu$", r"$\sigma$"],
                            truths=[mu_true, sigma_true])

        plt.show()
Example #24
0
    def plot_trace(self, trace, show=False):
        """Use `Arviz` to plot a trace of the trainable parameters,
        alongside a histogram of their distribution.

        Parameters
        ----------
        trace: numpy.ndarray
            The parameter trace with shape=(n_steps, n_trainable_parameters+1)
        show: bool
            If true, the plot will be shown.

        Returns
        -------
        matplotlib.pyplot.Figure
            The plotted figure.
        """

        trace_dict = {}

        for index, label in enumerate(self._prior_labels):
            trace_dict[label] = trace[:, index + 1]

        data = arviz.convert_to_inference_data(trace_dict)

        axes = arviz.plot_trace(data)
        figure = axes[0][0].figure

        if show:
            figure.show()

        return figure
Example #25
0
    def plot_trace(self,plotfile=None, show=False):
        """ Plot the trace of the MCMC run along with the marginal distributions of a subset of parameters

        Parameters
        ----------
        plotfile : str, optional
            Name of a file to write the plot to
        show : bool, optional, default False
            Whether to show the plot window

        """
        if not self.fitted:
            pass #raise an error here
        ax = az.plot_trace(
            self.trace,
            var_names=self.plot_trace_vars,
            #filter_vars="regex",
            compact=True,
            #lines=[
                #("mu", {}, mu),
                #("cov", {}, cov),
                #("chol_stds", {}, sigma),
                #("chol_corr", {}, rho),
            #],
        )
        if isinstance(plotfile, str):
            plt.save(plotfile)
        if show:
            plt.show()
Example #26
0
def plot_trace(trace: Dict[str, ndarray], show: bool = False) -> Figure:
    """Use `Arviz` to plot a trace of the variable parameters,
    alongside a histogram of their distribution.

    Parameters
    ----------
    trace: dict of str and numpy.ndarray
        The parameter trace with shape=(n_steps, n_variable_parameters)
    show: bool
        If true, the plot will be shown.

    Returns
    -------
    matplotlib.pyplot.Figure
        The plotted figure.
    """

    data = arviz.convert_to_inference_data(trace)

    axes = arviz.plot_trace(data)
    figure = axes[0][0].figure

    if show:
        figure.show()

    return figure
Example #27
0
def mcmc_traceplot(key, val, title=None, fpath=None, **kwargs):
    if '_' in key:
        key = ' '.join(key.split('_'))

    try:
        az.plot_trace({key: val}, **kwargs)
        fig = plt.gcf()
        if title is not None:
            fig.suptitle(title)

        if fpath is not None:
            savefig(fig, fpath)

        return fig

    except ValueError:
        return None
Example #28
0
def bayes_single_detector(t, s):
    with pm.Model() as abrupt_model:
        steppoint = pm.DiscreteUniform("steppoint",
                                       lower=t[1],
                                       upper=t[-1],
                                       testval=50)
        early_mu = pm.Uniform("early_mu", -50, 50)
        late_mu = pm.Uniform("late_mu", -50, 50)
        mu = pm.math.switch(steppoint >= t, early_mu, late_mu)
        sigma = pm.Normal('sigma', mu=30, sigma=20)
        s_obs = pm.Normal("s_obs", mu=mu, sigma=sigma, observed=s)

    with abrupt_model:
        trace = pm.sample(1000)
        az.plot_trace(trace)
        plt.show()
    return trace
Example #29
0
def efficient_trace(trace, var_names, figstem, max_panel=6):
    """
    Wrap the arviz call for trace to speed things up by splitting into smaller figures bundled as a scrollable PDF.
    """

    N = len(var_names)
    n = 0
    i = 0
    while n <= N:
        print("Plotting {:} of {:}".format(n, N))
        plot_vars = var_names[n : n + max_panel]

        az.plot_trace(trace, var_names=plot_vars)
        plt.savefig(figstem.format(i))
        plt.close("all")
        n += max_panel
        i += 1
Example #30
0
def plot_trace(trace, title):
    """Generate a trace plot with Arviz"""
    plot_kwargs = {'color': 'b'}
    axs = az.plot_trace(trace, trace_kwargs=plot_kwargs)
    fig = plt.gcf()
    fig.suptitle(title, fontsize=16)
    # ax0, ax1 = axs[0,0], axs[0,1]
    return fig, axs