def test_arviz_to_json():
    import arviz as az

    data = az.load_arviz_data("centered_eight")
    arviz_to_json(data, "centered_eight.npz")
    check_zip("centered_eight.npz")

    # check that we can write to a file descriptor as well as a filename
    with open("centered_eight_as_f.npz", "wb") as f:
        arviz_to_json(data, f)
    check_zip("centered_eight_as_f.npz")
Exemple #2
0
samples=2000
chains=2 
tune=1000
geometry_model=pm.Model(coords=coords)
with geometry_model:
    #to store the n-parameter of Binomial dist 
    #in the constant group of ArviZ InferenceData
    #You should always call it n for imd to retrieve it
    n = pm.Data('n', data.tries)
    sigma_angle = pm.HalfNormal('sigma_angle')
    p_goes_in = pm.Deterministic('p_goes_in', 2 * Phi(tt.arcsin((CUP_RADIUS - BALL_RADIUS) / data.distance) / sigma_angle) - 1, dims='distance')
    successes = pm.Binomial('successes', n=n, p=p_goes_in, observed=data.successes, dims='distance')
    #inference
    trace_g = pm.sample(draws=samples, chains=chains, tune=tune)
    prior_g= pm.sample_prior_predictive(samples=samples)
    posterior_predictive_g = pm.sample_posterior_predictive(trace_g,samples=samples)    
    
## STEP 1
# will also capture all the sampler statistics
data_g = az.from_pymc3(trace=trace_g, prior=prior_g, posterior_predictive=posterior_predictive_g)

## STEP 2
#dag    
dag_g = get_dag(geometry_model)    
# insert dag into sampler stat attributes
data_g.sample_stats.attrs["graph"] = str(dag_g)

## STEP 3    
# save data
arviz_to_json(data_g, fileName+'.npz')
Exemple #3
0
#model-inference
coords_c = {"school": ["A","B","C","D","E","F","G","H"]}
fileName_c="eight_schools_centered"
samples=4000
chains=2
tune=1000
with pm.Model(coords=coords_c) as centered_eight:
    mu = pm.Normal('mu', mu=0, sigma=5)
    tau = pm.HalfCauchy('tau', beta=5)
    theta = pm.Normal('theta', mu=mu, sigma=tau, dims='school')
    y = pm.Normal('y', mu=theta, sigma=sigma, observed=obs, dims='school')
	#inference
	trace_c = pm.sample(samples, chains=chains, tune=tune, random_seed=SEED)
    prior_c= pm.sample_prior_predictive(samples=samples)
    posterior_predictive_c = pm.sample_posterior_predictive(trace_c, samples=samples) 

## STEP 1	
# will also capture all the sampler statistics
data_c = az.from_pymc3(trace = trace_c, prior = prior_c, posterior_predictive = posterior_predictive_c)

## STEP 2
#dag
dag_c = get_dag(centered_eight)   
# insert dag into sampler stat attributes
data_c.sample_stats.attrs["graph"] = str(dag_c)

## STEP 3    
# save data   
arviz_to_json(data_c, fileName_c+'.npz')
Exemple #4
0
    b = pm.Normal("b", mu=mu_b, sd=sigma_b, dims="driver")
    sigma = pm.HalfNormal("sigma", sd=sigma_sigma, dims="driver")
    y_pred = pm.Normal('y_pred',
                       mu=a[driver_idx] + b[driver_idx] * day_idx,
                       sd=sigma[driver_idx],
                       observed=reactions.Reaction,
                       dims="driver_idx_day")
    ## inference
    trace_hi = pm.sample(draws=samples, chains=chains, tune=tune)
    prior_hi = pm.sample_prior_predictive(samples=samples)
    posterior_predictive_hi = pm.sample_posterior_predictive(trace_hi,
                                                             samples=samples)

## STEP 1
## export inference results in ArviZ InferenceData obj
## will also capture all the sampler statistics
data_hi = az.from_pymc3(trace=trace_hi,
                        prior=prior_hi,
                        posterior_predictive=posterior_predictive_hi)

## STEP 2
## extract dag
dag_hi = get_dag(hierarchical_model)
## insert dag into sampler stat attributes
data_hi.sample_stats.attrs["graph"] = str(dag_hi)

## STEP 3
## save data
fileName_hi = "reaction_times_hierarchical"
arviz_to_json(data_hi, fileName_hi + '.npz')
Exemple #5
0
        trace = pm.sample(samples=samples, chains=chains)
        prior = pm.sample_prior_predictive(samples=samples)
        posterior_predictive = pm.sample_posterior_predictive(trace)

        dag = get_dag(model)

    # will also capture all the sampler statistics
    data = az.from_pymc3(trace=trace,
                         prior=prior,
                         posterior_predictive=posterior_predictive)

    # insert dag into sampler stat attributes
    data.sample_stats.attrs["graph"] = dag
    return data


if __name__ == "__main__":
    # generate a single switchpoint model
    poverty = load_data()
    model = define_model(poverty)
    dag = get_dag(model)
    data = capture_inference(model)
    arviz_to_json(data, "switchpoint.npz")

    # generate multiple models
    models = {
        "centered": az.load_arviz_data("centered_eight"),
        "noncentered": az.load_arviz_data("non_centered_eight"),
    }
    multi_arviz_to_json(models, "multimodel.zip")
Exemple #6
0
    b = pm.Normal("b", mu=10, sd=250)
    sigma = pm.HalfNormal("sigma", sd=200)
    y_pred = pm.Normal('y_pred',
                       mu=a + b * day_idx,
                       sd=sigma,
                       observed=reactions.Reaction,
                       dims="driver_idx_day")
    ## inference
    trace_p = pm.sample(samples, chains=chains, tune=tune)
    prior_p = pm.sample_prior_predictive(samples=samples)
    posterior_predictive_p = pm.sample_posterior_predictive(trace_p,
                                                            samples=samples)

## STEP 1
## export inference results in ArviZ InferenceData obj
## will also capture all the sampler statistics
data_p = az.from_pymc3(trace=trace_p,
                       prior=prior_p,
                       posterior_predictive=posterior_predictive_p)

## STEP 2
## extract dag
dag_p = get_dag(fullyPooled_model)
## insert dag into sampler stat attributes
data_p.sample_stats.attrs["graph"] = str(dag_p)

## STEP 3
## save data
fileName_p = "reaction_times_pooled"
arviz_to_json(data_p, fileName_p + '.npz')