import pystan
import numpy as np
import os
import time

theta = 10
n = 50
y = np.random.exponential(10, n)
data = {'n': y.size, 'y': y}

seed = 1

include_files = ["external_manual.hpp"]
start = time.time()
sm = pystan.StanModel(file="model.stan",
                      allow_undefined=True,
                      includes=include_files,
                      include_dirs=["."],
                      verbose=True)
print("Compilation took {0:.1f} seconds".format(time.time() - start))
fit = sm.vb(data=data, iter=1000, algorithm='meanfield', seed=seed)
print(fit['mean_pars'])
Example #2
0
stan_data["obs_mBx1c_cov"] = np.array(
    [list(item) for item in stan_data["obs_mBx1c_cov"]], dtype=np.float64)
print(stan_data["obs_mBx1c_cov"])

stan_data["allow_alpha_S_N"] = 1
print("SKEW NORMAL!!!!" * 100)

pfl_name = subprocess.getoutput("hostname") + "_1DGP.pickle"

f = open("../stan_code_1DGP.txt", 'r')
lines = f.read()
f.close()

try:
    [sm, sc] = pickle.load(open(pfl_name, 'rb'))

    if sc != lines:
        raise_time
except:
    sm = pystan.StanModel(file="../stan_code_1DGP.txt")
    pickle.dump([sm, lines], open(pfl_name, 'wb'))

fit = sm.sampling(data=stan_data, iter=1000, chains=4, refresh=10, init=initfn)

print(fit)

fit_params = fit.extract(permuted=True)

pickle.dump((stan_data, fit_params), open("results.pickle", 'wb'))
Example #3
0
parameters {
    real beta0;
    real beta1;                                                
    real<lower=0> sigma;               
}
model {
    vector[nobs] mu;

    mu = beta0 + beta1 * x;

    y ~ normal(mu, sigma);             // Likelihood function
}
"""

# compile model
model = pystan.StanModel(model_code=stan_code)

# perform fit
fit = model.sampling(data=toy_data, iter=5000, chains=3, verbose=False, n_jobs=3)

# Output
nlines = 8                     # number of lines in screen output

output = str(fit).split('\n')
for item in output[:nlines]:
    print(item)   


# Plot
import pylab as plt
Example #4
0
import matplotlib.pyplot as plt
import numpy as np
from simulate_data import simulate_data
import ranking as rk

import pystan
import patsy

import pickle

try:
    gen = pickle.load(open("thurstonian_gen.pkl", 'rb'))

except FileNotFoundError:

    gen = pystan.StanModel(file="thurstonian_cov_generate.stan")

    with open('thurstonian_gen.pkl', 'wb') as f:

        pickle.dump(gen, f)

try:
    sm = pickle.load(open("thurstonian.pkl", 'rb'))

except FileNotFoundError:

    sm = pystan.StanModel(file="thurstonian_cov.stan")

    with open('thurstonian_cov.pkl', 'wb') as f:

        pickle.dump(sm, f)
Example #5
0
    y_mean = x_full @ W
    y_full = y_mean + np.random.normal(0, noise_sigma, (N_train + N_test, M))
    Y = y_full[:N_train]
    Y_test = y_full[N_train:]
    model_data = {
        'N': N_train,
        'K': K,
        'y': Y[:, 0],
        'X': X,
        'sigma': noise_sigma
    }

    try:
        sm = pickle.load(open('model_linear_reg_chains192.pkl', 'rb'))
    except:
        sm = pystan.StanModel(model_code=linear_reg_fixed_variance_code)
        with open('model_linear_reg_chains192.pkl', 'wb') as f:
            pickle.dump(sm, f)

    for n in range(N_sim):
        w_mean_n = w_mean_vi_list[n]
        num_proposal_samples = 4000
        try:
            fit_hmc
        except NameError:
            fit_hmc = sm.sampling(data=model_data, iter=2000)

        try:
            fit_vb
        except NameError:
            fit_vb = sm.vb(data=model_data,
Example #6
0
 def test_threading_support(self):
     # Dont test with Windows
     if sys.platform.startswith("win"):
         return
     # Set up environmental variable
     os.environ['STAN_NUM_THREADS'] = "2"
     # Enable threading
     extra_compile_args = ['-pthread', '-DSTAN_THREADS']
     stan_code = """
     functions {
       vector bl_glm(vector mu_sigma, vector beta,
                     real[] x, int[] y) {
         vector[2] mu = mu_sigma[1:2];
         vector[2] sigma = mu_sigma[3:4];
         real lp = normal_lpdf(beta | mu, sigma);
         real ll = bernoulli_logit_lpmf(y | beta[1] + beta[2] * to_vector(x));
         return [lp + ll]';
       }
     }
     data {
       int<lower = 0> K;
       int<lower = 0> N;
       vector[N] x;
       int<lower = 0, upper = 1> y[N];
     }
     transformed data {
       int<lower = 0> J = N / K;
       real x_r[K, J];
       int<lower = 0, upper = 1> x_i[K, J];
       {
         int pos = 1;
         for (k in 1:K) {
           int end = pos + J - 1;
           x_r[k] = to_array_1d(x[pos:end]);
           x_i[k] = y[pos:end];
           pos += J;
         }
       }
     }
     parameters {
       vector[2] beta[K];
       vector[2] mu;
       vector<lower=0>[2] sigma;
     }
     model {
       mu ~ normal(0, 2);
       sigma ~ normal(0, 2);
       target += sum(map_rect(bl_glm, append_row(mu, sigma),
                              beta, x_r, x_i));
     }
     """
     stan_data = dict(K=4,
                      N=12,
                      x=[
                          1.204, -0.573, -1.35, -1.157, -1.29, 0.515, 1.496,
                          0.918, 0.517, 1.092, -0.485, -2.157
                      ],
                      y=[1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1])
     stan_model = pystan.StanModel(model_code=stan_code,
                                   extra_compile_args=extra_compile_args)
     fit = stan_model.sampling(data=stan_data, chains=2, n_jobs=1)
     self.assertIsNotNone(fit)
     fit2 = stan_model.sampling(data=stan_data, chains=2, n_jobs=2)
     self.assertIsNotNone(fit2)
     draw = fit.extract(pars=fit.model_pars + ['lp__'], permuted=False)
     lp = {
         key: values[-1, 0]
         for key, values in draw.items() if key == 'lp__'
     }['lp__']
     draw = {
         key: values[-1, 0]
         for key, values in draw.items() if key != 'lp__'
     }
     draw = fit.unconstrain_pars(draw)
     self.assertEqual(fit.log_prob(draw), lp)
     draw2 = fit2.extract(pars=fit2.model_pars + ['lp__'], permuted=False)
     lp2 = {
         key: values[-1, 0]
         for key, values in draw2.items() if key == 'lp__'
     }['lp__']
     draw2 = {
         key: values[-1, 0]
         for key, values in draw2.items() if key != 'lp__'
     }
     draw2 = fit2.unconstrain_pars(draw2)
     self.assertEqual(fit2.log_prob(draw2), lp2)
len_obs = len(obs_no)

print(len_obs)
print(obs_no)
print(y)

stan_data = {
    'T': T,
    'len_obs': len_obs,
    'y': y,
    'obs_no': obs_no
}

if os.path.exists('5-3-2-local-level-interpolation.pkl'):
    # sm = pickle.load(open('5-3-2-local-level-interpolation.pkl', 'rb'))
    sm = pystan.StanModel(file='5-3-2-local-level-interpolation.stan')
else:
    # a model using prior for mu and sigma.
    sm = pystan.StanModel(file='5-3-2-local-level-interpolation.stan')

control = {
    'adapt_delta': 0.8,
    'max_treedepth': 10
}

mcmc_result = sm.sampling(
    data=stan_data,
    seed=1,
    chains=4,
    iter=2000,
    warmup=1000,
Example #8
0
def model_impact_causal(df,
                        kind='self',
                        prior_beta=1.,
                        prior_delta=1.,
                        prior_alpha=1.,
                        prior_mu_beta=1.,
                        prior_mu_alpha=1.,
                        code_only=False,
                        iters=NUM_SAMPLES,
                        adapt_delta=ADAPT_DELTA,
                        max_treedepth=M_TREEDEPTH):
    # Model: Eqs. 1 & 2, with `f=0`
    # Results: Tables 1, S2; Figure 4
    import pystan as st
    model_code = '''
                data {
                    int<lower=1> n; //number of data points
                    int<lower=1> m; //number of conditions
                    int<lower=2> k; //number of outcomes
                    int<lower=1,upper=m> x_cond[n]; //treatment group
                    int<lower=1,upper=k> y_pre[n]; //pre-exposure outcome
                    int<lower=1,upper=k> y_post[n]; //post-exposure outcome
                }
                parameters {
                    real mu_beta;
                    real beta[m];
                    vector<lower=0>[k-1] mu_delta;
                    simplex[k-1] delta[m];
                    vector[k-1] mu_alpha;
                    ordered[k-1] alpha[m];
                    ordered[k-1] alpha_pre;
                }
                model {
                    mu_beta ~ normal(0, %f);
                    mu_alpha ~ normal(0, %f);
                    mu_delta ~ exponential(%f);
                    for (i in 1:m){
                        beta[i] ~ normal(mu_beta, %f);
                        alpha[i] ~ normal(mu_alpha, %f);
                        delta[i] ~ dirichlet(mu_delta);
                    }
                    alpha_pre ~ normal(0, %f);
                    for (i in 1:n){
                        y_pre[i] ~ ordered_logistic(0, alpha_pre);
                        y_post[i] ~ ordered_logistic(beta[x_cond[i]]*sum(delta[x_cond[i]][:y_pre[i]-1]), alpha[x_cond[i]]);
                    }
                }
            ''' % (prior_mu_beta, prior_mu_alpha, prior_delta, prior_beta,
                   prior_alpha, prior_alpha)

    data = {
        'n': df.shape[0],
        'm': 2,
        'k': 4,
        'x_cond': df['Treatment'].values + 1,
        'y_pre': df['Vaccine Intent for %s (Pre)' % kind].values,
        'y_post': df['Vaccine Intent for %s (Post)' % kind].values
    }
    if code_only: return model_code
    model = st.StanModel(model_code=model_code)
    fit = model.sampling(data=data,
                         iter=iters,
                         control=dict(adapt_delta=adapt_delta,
                                      max_treedepth=max_treedepth))
    return fit
Example #9
0
def model_socdem(df,
                 dd,
                 atts=[],
                 model_name='causal',
                 group=None,
                 kind='self',
                 decrease=0,
                 prior_beta=1.,
                 prior_delta=1.,
                 prior_alpha=1.,
                 prior_mu_beta=1.,
                 prior_mu_alpha=1.,
                 code_only=False,
                 iters=NUM_SAMPLES,
                 adapt_delta=ADAPT_DELTA,
                 max_treedepth=M_TREEDEPTH):
    # Model: Eqs. 1 & 2, with `f` modeling a linear combination of socio-demographics and other covariates
    # Results: Tables S3, S4, S5, S6; Figures 5, S1, S2, S3
    import pystan as st
    import numpy as np
    from .bayesoc import Dim  #we define some helper classes to extract posterior samples easily
    cats = [
        'Age', 'Gender', 'Education', 'Employment', 'Religion', 'Political',
        'Ethnicity', 'Income'
    ]
    if isinstance(atts, str): atts = [atts]
    for att in atts:
        cats += [x for x in list(df) if x[:len(att)] == att]
    outs = [
        'Vaccine Intent for %s (Pre)' % kind,
        'Vaccine Intent for %s (Post)' % kind, 'Treatment'
    ]
    df = df[cats + outs].dropna()
    if group is not None: model_name = 'post'
    causal = int(model_name in ['causal', 'causaldiff'])
    dims = [
        Dim(pi=len(dd[cat]),
            out=causal + 1,
            prior_beta=prior_beta,
            prior_mu_beta=prior_mu_beta,
            value=dd[cat].keys(),
            name=cat) for cat in cats
    ]
    stan = [
        d.get_stan(outcome_size='m',
                   outcome_index='x_cond[i]',
                   hierarchical=True) for d in dims
    ]
    code = {'data': [], 'parameters': [], 'model': [], 'output': []}
    for key in code:
        for d in stan:
            code[key].append(d[key])
    model_code = {
        'pre':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=k> y_pre[n]; //pre-exposure
                                %s
                            }
                            parameters {
                                %s
                                ordered[k-1] alpha;
                            }
                            model {
                                %s
                                alpha ~ normal(0, %f);
                                for (i in 1:n)
                                    y_pre[i] ~ ordered_logistic(%s, alpha);
                            }
                        ''' %
        ('\n'.join(code['data']), '\n'.join(code['parameters']), '\n'.join(
            code['model']), prior_alpha, ' + '.join(code['output'])),
        'post':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=k> y_pre[n]; //pre-exposure
                                int<lower=1,upper=k> y_post[n]; //post-exposure
                                %s
                            }
                            parameters {
                                %s
                                real beta;
                                simplex[k-1-%i] delta;
                                ordered[k-1] alpha;
                            }
                            model {
                                %s
                                beta ~ normal(0, %f);
                                {
                                    vector[k-1-%i] u;
                                    for (i in 1:(k-1-%i))
                                        u[i] = 1;
                                    delta ~ dirichlet(%f*u);
                                }
                                alpha ~ normal(0, %f);
                                for (i in 1:n)
                                    y_post[i] ~ ordered_logistic(beta*sum(delta[:y_pre[i]-1-%i]) + %s, alpha);
                            }
                        ''' %
        ('\n'.join(code['data']), '\n'.join(code['parameters']), decrease,
         '\n'.join(code['model']), prior_beta, decrease, decrease, prior_delta,
         prior_alpha, decrease, ' + '.join(code['output'])),
        'causal':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=1> m; //number of conditions
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=m> x_cond[n]; //treatment group
                                int<lower=1,upper=k> y_pre[n]; //pre-exposure outcome
                                int<lower=1,upper=k> y_post[n]; //post-exposure outcome
                                %s
                            }
                            parameters {
                                %s
                                real mu_beta;
                                real beta[m];
                                vector<lower=0>[k-1-%i] mu_delta;
                                simplex[k-1-%i] delta[m];
                                vector[k-1] mu_alpha;
                                ordered[k-1] alpha[m];
                            }
                            model {
                                %s
                                mu_beta ~ normal(0, %f);
                                mu_alpha ~ normal(0, %f);
                                mu_delta ~ exponential(%f);
                                for (i in 1:m){
                                    beta[i] ~ normal(mu_beta, %f);
                                    alpha[i] ~ normal(mu_alpha, %f);
                                    delta[i] ~ dirichlet(mu_delta);
                                }
                                for (i in 1:n)
                                    y_post[i] ~ ordered_logistic(beta[x_cond[i]]*sum(delta[x_cond[i]][:y_pre[i]-1-%i]) + %s, alpha[x_cond[i]]);
                            }
                        ''' %
        ('\n'.join(code['data']), '\n'.join(code['parameters']), decrease,
         decrease, '\n'.join(code['model']), prior_mu_beta, prior_mu_alpha,
         prior_delta, prior_beta, prior_alpha, decrease, ' + '.join(
             code['output'])),
        'causaldiff':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=1> m; //number of conditions
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=m> x_cond[n]; //treatment group
                                int<lower=1,upper=k> y_diff[n]; //post-pre difference in outcomes
                                %s
                            }
                            parameters {
                                %s
                                vector[k-1] mu_alpha;
                                ordered[k-1] alpha[m];
                            }
                            model {
                                %s
                                mu_alpha ~ normal(0, %f); 
                                for (i in 1:m)
                                    alpha[i] ~ normal(mu_alpha, %f);
                                for (i in 1:n)
                                    y_diff[i] ~ ordered_logistic(%s, alpha[x_cond[i]]);
                            }
                        ''' % ('\n'.join(code['data']), '\n'.join(
            code['parameters']), '\n'.join(code['model']), prior_mu_alpha,
                               prior_alpha, ' + '.join(code['output']))
    }
    data = {}
    if causal:
        data['m'] = 2
        data['x_cond'] = df['Treatment'].values + 1
        if model_name == 'causal':
            data['k'] = 4
            data['y_pre'] = df['Vaccine Intent for %s (Pre)' % kind].values
            data['y_post'] = df['Vaccine Intent for %s (Post)' % kind].values
        else:
            data['k'] = 3
            tmp = df['Vaccine Intent for %s (Post)' %
                     kind].values - df['Vaccine Intent for %s (Pre)' %
                                       kind].values
            tmp[tmp > 0] = 1
            tmp[tmp < 0] = -1
            data['y_diff'] = tmp + 2
    elif model_name == 'post':
        data['k'] = 4
        df = df.loc[df['Treatment'] == group]
        data['y_pre'] = df['Vaccine Intent for %s (Pre)' % kind].values
        data['y_post'] = df['Vaccine Intent for %s (Post)' % kind].values
    else:
        data['k'] = 4 - decrease
        data['y_pre'] = df['Vaccine Intent for %s (Pre)' %
                           kind].values - decrease
    data['n'] = df.shape[0]
    print('Dataframe of size:', df.shape)
    for i in range(len(cats)):
        name = dims[i].name
        data['k_%s' % name] = len(dd[cats[i]])
        data[name] = np.array(df[cats[i]].values, dtype=int)
        if data[name].min() == 0: data[name] += 1
    if code_only: return model_code[model_name]
    model = st.StanModel(model_code=model_code[model_name])
    fit = model.sampling(data=data,
                         iter=iters,
                         control=dict(adapt_delta=adapt_delta,
                                      max_treedepth=max_treedepth))
    return fit
Example #10
0
def model_image_impact(df,
                       group=1,
                       kind='self',
                       prior_beta=1.,
                       prior_delta=1.,
                       prior_gamma=1.,
                       prior_alpha=1.,
                       code_only=False,
                       iters=NUM_SAMPLES,
                       adapt_delta=ADAPT_DELTA,
                       max_treedepth=M_TREEDEPTH):
    # Model: Eq. 7
    # Results: Table S7
    import pystan as st
    import numpy as np
    model_code = '''
                    data {
                        int<lower=1> n; //number of data points
                        int<lower=1> p; //number of images
                        int<lower=1> m; //number of metrics
                        int<lower=2> k; //number of outcomes
                        int<lower=1,upper=k> y_pre[n]; //pre-exposure
                        int<lower=1,upper=k> y_post[n]; //post-exposure
                        matrix[p,m] x_img[n]; //image metrics
                    }
                    parameters {
                        real beta;
                        vector[m] beta_img;
                        simplex[p] gamma;
                        simplex[k-1] delta;
                        ordered[k-1] alpha;
                    }
                    model {
                        beta ~ normal(0, %f);
                        beta_img ~ normal(0, %f);
                        {
                            vector[p] u_img;
                            for (i in 1:p)
                                u_img[i] = 1;
                            gamma ~ dirichlet(%f*u_img);
                        }
                        {
                            vector[k-1] u;
                            for (i in 1:(k-1))
                                u[i] = 1;
                            delta ~ dirichlet(%f*u);
                        }
                        alpha ~ normal(0, %f);
                        for (i in 1:n)
                            y_post[i] ~ ordered_logistic(beta*sum(delta[:y_pre[i]-1]) + to_row_vector(gamma)*x_img[i]*beta_img, alpha);
                    }
                ''' % (prior_beta, prior_beta, prior_gamma, prior_delta,
                       prior_alpha)

    metrics = ['Vaccine Intent', 'Agreement', 'Trust', 'Fact-check', 'Share']
    df = df.loc[df['Treatment'] == group]
    x = np.dstack([
        df[['Image %i:%s' % (i + 1, m) for i in range(5)]].values
        for m in metrics
    ])
    data = {
        'n': df.shape[0],
        'p': 5,
        'm': len(metrics),
        'k': 4,
        'x_img': x,
        'y_pre': df['Vaccine Intent for %s (Pre)' % kind].values,
        'y_post': df['Vaccine Intent for %s (Post)' % kind].values
    }
    if code_only: return model_code
    model = st.StanModel(model_code=model_code)
    fit = model.sampling(data=data,
                         iter=iters,
                         control=dict(adapt_delta=adapt_delta,
                                      max_treedepth=max_treedepth))
    return fit
Example #11
0
def model_similar_content(df,
                          model_name='seen',
                          kind='self',
                          prior_beta=1.,
                          prior_alpha=1.,
                          prior_delta=1.,
                          prior_mu_beta=1.,
                          prior_mu_alpha=1.,
                          code_only=False,
                          iters=NUM_SAMPLES,
                          adapt_delta=ADAPT_DELTA,
                          max_treedepth=M_TREEDEPTH):
    import pystan as st
    import numpy as np
    model_code = {
        'pre':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=1> m; //number of conditions
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=m> x_cond[n]; //treatment group
                                int<lower=0,upper=1> x_seen[n]; //seen images
                                int<lower=1,upper=k> y_pre[n]; //pre-exposure outcome
                            }
                            parameters {
                                real mu_beta;
                                real beta[m];
                                vector[k-1] mu_alpha;
                                ordered[k-1] alpha[m];
                                real<lower=0,upper=1> theta[m];
                            }
                            model {
                                mu_beta ~ normal(0, %f);
                                mu_alpha ~ normal(0, %f);
                                for (i in 1:m){
                                    beta[i] ~ normal(mu_beta, %f);
                                    alpha[i] ~ normal(mu_alpha, %f);
                                }
                                theta ~ beta(1, 1);
                                for (i in 1:n){
                                    x_seen[i] ~ bernoulli(theta[x_cond[i]]);
                                    y_pre[i] ~ ordered_logistic(beta[x_cond[i]]*x_seen[i], alpha[x_cond[i]]);
                                }
                            }
                        ''' %
        (prior_mu_beta, prior_mu_alpha, prior_beta, prior_alpha),
        'causal':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=1> m; //number of conditions
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=m> x_cond[n]; //treatment group
                                int<lower=0,upper=1> x_seen[n]; //seen images
                                int<lower=1,upper=k> y_pre[n]; //pre-exposure outcome
                                int<lower=1,upper=k> y_post[n]; //post-exposure outcome
                            }
                            parameters {
                                real mu_beta;
                                real beta[m];
                                real mu_beta_pre;
                                real beta_pre[m];
                                real mu_beta_post;
                                real beta_post[m];
                                vector<lower=0>[k-1] mu_delta;
                                simplex[k-1] delta[m];
                                vector[k-1] mu_alpha_pre;
                                ordered[k-1] alpha_pre[m];
                                vector[k-1] mu_alpha_post;
                                ordered[k-1] alpha_post[m];
                                real<lower=0,upper=1> theta[m];
                            }
                            model {
                                mu_beta ~ normal(0, %f);
                                mu_beta_pre ~ normal(0, %f);
                                mu_beta_post ~ normal(0, %f);
                                mu_alpha_pre ~ normal(0, %f);
                                mu_alpha_post ~ normal(0, %f);
                                mu_delta ~ exponential(%f);
                                for (i in 1:m){
                                    beta[i] ~ normal(mu_beta, %f);
                                    beta_pre[i] ~ normal(mu_beta_pre, %f);
                                    beta_post[i] ~ normal(mu_beta_post, %f);
                                    alpha_pre[i] ~ normal(mu_alpha_pre, %f);
                                    alpha_post[i] ~ normal(mu_alpha_post, %f);
                                    delta[i] ~ dirichlet(mu_delta);
                                }
                                theta ~ beta(1, 1);
                                for (i in 1:n){
                                    x_seen[i] ~ bernoulli(theta[x_cond[i]]);
                                    y_pre[i] ~ ordered_logistic(beta_pre[x_cond[i]]*x_seen[i], alpha_pre[x_cond[i]]);
                                    y_post[i] ~ ordered_logistic(beta[x_cond[i]]*sum(delta[x_cond[i]][:y_pre[i]-1])+beta_post[x_cond[i]]*x_seen[i], alpha_post[x_cond[i]]);
                                }
                            }
                        ''' %
        (prior_mu_beta, prior_mu_beta, prior_mu_beta, prior_mu_alpha,
         prior_mu_alpha, prior_delta, prior_beta, prior_beta, prior_beta,
         prior_alpha, prior_alpha),
        'seen_ordinal':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=1> m; //number of conditions
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=m> x_cond[n]; //treatment group
                                int<lower=0,upper=1> x_seen[n]; //seen images
                                int<lower=1,upper=k> y_pre[n]; //pre-exposure outcome
                            }
                            parameters {
                                real mu_beta;
                                real beta[m];
                                vector<lower=0>[k-1] mu_delta;
                                simplex[k-1] delta[m];
                                real mu_alpha;
                                real alpha[m];
                            }
                            model {
                                mu_beta ~ normal(0, %f);
                                mu_alpha ~ normal(0, %f);
                                mu_delta ~ exponential(%f);
                                for (i in 1:m){
                                    beta[i] ~ normal(mu_beta, %f);
                                    alpha[i] ~ normal(mu_alpha, %f);
                                    delta[i] ~ dirichlet(mu_delta);
                                }
                                for (i in 1:n)
                                    x_seen[i] ~ bernoulli_logit(beta[x_cond[i]]*sum(delta[x_cond[i]][:y_pre[i]-1]) + alpha[x_cond[i]]);
                            }
                        ''' %
        (prior_mu_beta, prior_mu_alpha, prior_delta, prior_beta, prior_alpha),
        'seen':
        '''
                            data {
                                int<lower=1> n; //number of data points
                                int<lower=1> m; //number of conditions
                                int<lower=2> k; //number of outcomes
                                int<lower=1,upper=m> x_cond[n]; //treatment group
                                int<lower=0,upper=1> x_seen[n]; //seen images
                                int<lower=1,upper=k> y_pre[n]; //pre-exposure outcome
                            }
                            parameters {
                                vector[k] mu_beta;
                                vector[k] beta[m];
                            }
                            model {
                                mu_beta ~ normal(0, %f);
                                for (i in 1:m)
                                    beta[i] ~ normal(mu_beta, %f);
                                for (i in 1:n)
                                    x_seen[i] ~ bernoulli_logit(beta[x_cond[i]][y_pre[i]]);
                            }
                        ''' % (prior_mu_beta, prior_beta)
    }

    df = df.loc[df['Seen such online content'] != 3]  #ignoring do-not-know's
    data = {
        'n': df.shape[0],
        'm': 2,
        'k': 4,
        'x_cond': df['Treatment'].values + 1,
        'y_pre': df['Vaccine Intent for %s (Pre)' % kind].values,
        'x_seen': [i % 2 for i in df['Seen such online content'].values]
    }  #"yes":1, "no":0
    if model_name == 'causal':
        data['y_post'] = df['Vaccine Intent for %s (Post)' % kind].values
    if code_only: return model_code
    model = st.StanModel(model_code=model_code[model_name])
    fit = model.sampling(data=data,
                         iter=iters,
                         control=dict(adapt_delta=adapt_delta,
                                      max_treedepth=max_treedepth))
    return fit
Example #12
0
 def time_stan():
     return pystan.StanModel(model_code=model, verbose=True)
    3] = 0.41  # Replace subject C12, C26, and C31's implausibly low minimum RT with the lowest RT from all other subjects' data
model_rt_mins[14] = 0.41
model_rt_mins[20] = 0.41

model_data = {
    'NS': 30,
    'NP': 20,
    'NT': 30,
    'correct': model_data_correct,
    'feedback': model_data_feedback,
    'bid_congruence': model_data_bid_congruence,
    'rt': model_data_rt,
    'rt_mins': model_rt_mins,
    'item_popularity': model_data_item_popularity
}

model_code_obj = pystan.StanModel(
    file='model_dual_insight_bayesian_noeye_non_social.stan.cpp',
    model_name='model_dual_insight_bayesian_noeye_non_social'
)  # Specific to model
fit = model_code_obj.sampling(data=model_data, iter=2000, chains=4, refresh=10)

with open('pickles/model_dual_insight_bayesian_noeye_non_social.pkl',
          'wb') as f:  # Specific to model
    pickle.dump(model_code_obj, f)

with open('pickles/fit_dual_insight_bayesian_noeye_non_social.pkl',
          'wb') as f:  # Specific to model
    pickle.dump(fit, f)

print(fit)
Example #14
0
####################################################
def save(obj, filename):
    """Save compiled models for reuse."""
    import pickle
    with open(filename, 'w') as f:
        pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)

def load(filename):
    """Reload compiled models for reuse."""
    import pickle
    return pickle.load(open(filename, 'r'))


####################################################

model = pystan.StanModel(model_code = pyfitfull)
save(model, 'pyfitfull_model')

new_model = load('pyfitfull_model')




####################################################
## part six
## fit stan model with saved pyfitfull_model
####################################################

print("\n fit pystan model")

## fit=pystan.stan(model_code="pystancode.stan", data=Mtable, iter=1000, chains=4)
Example #15
0
 def setUpClass(cls):
     model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
     cls.model = pystan.StanModel(model_code=model_code,
                                  model_name="normal1",
                                  verbose=True,
                                  obfuscate_model_name=False)
Example #16
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError("Too many command-line arguments.")

    x, y = LoadCloud(FLAGS.cloud_path)

    code = """
  functions {
  matrix cov_exp_quad_ARD(vector[] x,
                          real alpha,
                          vector rho,
                          real delta) {
      int N = size(x);
      matrix[N, N] K;
      real neg_half = -0.5;
      real sq_alpha = square(alpha);
      for (i in 1:(N-1)) {
        K[i, i] = sq_alpha + delta;
        for (j in (i + 1):N) {
          real v = sq_alpha * exp(neg_half *
                                  dot_self((x[i] - x[j]) ./ rho));
          K[i, j] = v;
          K[j, i] = v;
        }
      }
      K[N, N] = sq_alpha + delta;
      return K;
    }
  }
  data {
    int<lower=1> N;
    int<lower=1> D;
    vector[D] x[N];
    vector[N] y;
  }
  transformed data {
    real delta = 1e-9;
  }
  parameters {
    vector<lower=0>[D] rho;
    real<lower=0> alpha;
    real<lower=0> sigma;
  }
  model {
    matrix[N, N] cov = cov_exp_quad_ARD(x, alpha, rho, delta)
      + diag_matrix(rep_vector(square(sigma), N));
    matrix[N, N] L_cov = cholesky_decompose(cov);

    rho ~ inv_gamma(5, 5);
    alpha ~ normal(0, 1);
    sigma ~ normal(0, 1);

    y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);
  }
  """

    data = {
        "N": x.shape[0],
        "D": x.shape[1],
        "x": x,
        "y": y,
    }

    filename = "/tmp/stan_model_%s" % hashlib.md5(
        code.encode("ascii")).hexdigest()
    print(filename)
    try:
        sm = pickle.load(open(filename, "rb"))
    except FileNotFoundError:
        sm = pystan.StanModel(model_code=code)
        with open(filename, "wb") as f:
            pickle.dump(sm, f)
    fit = sm.sampling(data=data, iter=100000, chains=12)

    print(fit)

    params = fit.extract(["rho", "alpha", "sigma"])

    params = np.concatenate([
        params["rho"],
        params["alpha"][Ellipsis, np.newaxis],
        params["sigma"][Ellipsis, np.newaxis],
    ], -1)

    mean = params.mean(0)
    square = (params**2.).mean(0)

    print(params.shape)
    print(mean.shape)

    SaveJSON({"mean": mean, "square": square}, "/tmp/gp_reg_0")
Example #17
0
if _model == 'GTR':
    data['frequencies_alpha'] = [1, 1, 1, 1]
    data['rates_alpha'] = [1, 1, 1, 1, 1, 1]
elif _model == 'HKY':
    data['frequencies_alpha'] = [1, 1, 1, 1]

# Samples output file
sample_path = f'{stem}{epi}'
tree_path = f'{sample_path}.trees'

binary = _script.replace('.stan', '.pkl')
if binary == _script:
    binary = _script + '.pkl'
if not os.path.lexists(binary) or _compile:
    sm = pystan.StanModel(file=_script)
    with open(binary, 'wb') as f:
        pickle.dump(sm, f)
else:
    sm = pickle.load(open(binary, 'rb'))

if _algorithm == 'vb':
    stan_args = {}
    stan_args['output_samples'] = _samples
    if _eta:
        stan_args['eta'] = _eta
        stan_args['adapt_engaged'] = False
    if _seed:
        stan_args['seed'] = _seed

    fit = sm.vb(data=data,
Example #18
0
import os
import pickle
import pystan
"""
Script that compiles models in advance in order to save time during execution
"""

dir_name = os.getcwd() + '/stan_models/'

sm = pystan.StanModel(file=dir_name + 'usage_exp.stan', verbose=True)
with open(dir_name + 'usage_exp_model.pkl', 'wb') as f:
    pickle.dump(sm, f)

# sm = pystan.StanModel(file=dir_name + 'usage_exp_dummy.stan')
# with open(dir_name + 'usage_exp_dummy_model.pkl', 'wb') as f:
#     pickle.dump(sm, f)

# sm = pystan.StanModel(file=dir_name + 'usage_exp_dummy_markov.stan')
# with open(dir_name + 'usage_exp_dummy_markov_model.pkl', 'wb') as f:
#     pickle.dump(sm, f)
model {
  // Priors.
  alpha ~ lognormal(m_alpha, s_alpha);
  rho ~ lognormal(m_rho, s_rho);
  eta ~ std_normal();
  beta ~ std_normal();
 
  // Model.
  y ~ bernoulli_logit(beta + f);
}
"""


# Compile model. This takes about a minute.
sm = pystan.StanModel(model_code=model_code)


# Make data.
X, y = make_moons(n_samples=50, shuffle=True, noise=0.1, random_state=1)

# Generate stan data dictionary.
stan_data = create_stan_data(X, y)

# Fit via ADVI.
vb_fit = sm.vb(data=stan_data, iter=1000, seed=1,
               grad_samples=1, elbo_samples=1)

# Fit via HMC.
# - stepsize = 0.05
# - num leapfrog steps = 20
Example #20
0
 def fit(self, graph):
     if not nx.is_directed_acyclic_graph(graph):
         raise ValueError(
             "Input graph must be a directed acyclic graph (DAG)")
     all_dat = {}
     all_init = {}
     for i in graph.nodes:
         node = graph.nodes[i]
         parents = list(graph.predecessors(i))
         if node.get("prophet") is None:
             raise ValueError(
                 "All nodes must have an attribute called 'prophet' contained a prophet object"
             )
         m = graph.nodes[i]["prophet"]
         if node.get("df") is None:
             raise ValueError(
                 "All nodes must have an attribute called 'df' containing a pandas DataFrame"
             )
         if node.get("future") is None:
             raise ValueError(
                 "All nodes must have an attribute called 'future' containing a pandas DataFrame"
             )
         df = graph.nodes[i]["df"]
         future = graph.nodes[i]["future"]
         future = m.setup_dataframe(future.copy())
         only_future = future[df.shape[0]:]
         history = df[df['y'].notnull()].copy()
         m.history_dates = pd.to_datetime(
             pd.Series(df['ds'].unique(), name='ds')).sort_values()
         history = m.setup_dataframe(history, initialize_scales=True)
         m.history = history
         m.set_auto_seasonalities()
         seasonal_features, prior_scales, component_cols, modes = (
             m.make_all_seasonality_features(history))
         m.train_component_cols = component_cols
         m.component_modes = modes
         m.set_changepoints()
         trend_indicator = {'linear': 0, 'logistic': 1, 'flat': 2}
         seasonal_features_future, _, _, _ = (
             m.make_all_seasonality_features(future))
         dat = {
             'T': m.history.shape[0],
             'T_pred': only_future.shape[0],
             't_pred': np.array(only_future.t),
             f'K_{i}': seasonal_features.shape[1],
             f'S_{i}': len(m.changepoints_t),
             f'y_{i}': m.history['y_scaled'],
             't': m.history['t'],
             f't_change_{i}': m.changepoints_t,
             f'X_{i}': seasonal_features,
             f'sigmas_{i}': prior_scales,
             f'tau_{i}': m.changepoint_prior_scale,
             f'trend_indicator_{i}': trend_indicator[m.growth],
             f's_a_{i}': component_cols['additive_terms'],
             f's_m_{i}': component_cols['multiplicative_terms'],
             f'a_{i}': np.array([1, 1, 1, 1]),
             f'm_{i}': np.array([0, 0, 0, 0]),
             f'X_pred_{i}': seasonal_features_future[df.shape[0]:],
             f'S_pred_{i}': 3
         }
         if m.growth == 'linear':
             dat[f'cap_{i}'] = np.zeros(m.history.shape[0])
             dat[f'cap_pred_{i}'] = np.zeros(only_future.shape[0])
             kinit = m.linear_growth_init(history)
         elif m.growth == 'flat':
             dat[f'cap_{i}'] = np.zeros(m.history.shape[0])
             dat[f'cap_pred_{i}'] = np.zeros(only_future.shape[0])
             kinit = m.flat_growth_init(history)
         else:
             dat[f'cap_{i}'] = history['cap_scaled']
             dat[f'cap_pred_{i}'] = only_future['cap_scaled']
             kinit = m.logistic_growth_init(history)
         stan_init = {
             f'k_{i}': kinit[0],
             f'offset_{i}': kinit[1],
             f'delta_{i}': np.zeros(len(m.changepoints_t)),
             f'beta_{i}':
             np.zeros(seasonal_features.shape[1] + len(parents)),
             f'sigma_obs_{i}': 1,
         }
         all_dat.update(dat)
         all_init.update(stan_init)
     all_dat['n_samp'] = self.n_samp
     self.dat = all_dat
     model_code = self.generate_stan_code(graph)
     model = pystan.StanModel(model_code=model_code)
     fit = model.optimizing(data=all_dat, init=lambda: all_init, iter=1e4)
     # Loop through nodes again to put forcast results back
     for i in graph.nodes:
         m = graph.nodes[i]["prophet"]
         scale = m.y_scale
         graph.nodes[i]["y_samples"] = fit[f"y_pred_{i}"] * scale
         graph.nodes[i]["y_hat"] = fit[f"y_hat_{i}"] * scale
     return (graph)
Example #21
0
         'gamma02': 4./5,\
         'gamma03': 3./5,\
         'gamma04': 2./5,\
         'gamma05': 1./5,\
         'mag_int_raw': mag_renorm, \
         'L_Omega': numpy.identity(5), \
         'Delta_unit':R_simplex, \
         'Delta_scale': 15./4, \
         'k_unit': R_simplex, \
         'R_unit': R_simplex, \
         'rho11': -17./5,\
         'rho12': 0.*3./5,\
         'rho13': 0./5,\
         'rho14': 0.*3./5,\
         'rho15': 0.*3./5,\
         } \
        for _ in range(8)]

sm = pystan.StanModel(file='gerard11.stan')
control = {'stepsize': 1}
fit = sm.sampling(data=data,
                  iter=5000,
                  chains=8,
                  control=control,
                  init=init,
                  thin=1)

output = open('temp11.pkl', 'wb')
pickle.dump((fit.extract(), fit.get_sampler_params()), output, protocol=2)
output.close()
print fit
Example #22
0
def run_fir_hmc(data_path, input_order,  prior='tc', hot_start=False):
    """ Input order gives the terms # gives the terms b_0 * u_k + b_1 * u_{k-1} + .. + b_{input_order-1} * u_{k-input_order+1}"""
    """ prior can be 'tc' for tuned correlated kernel or 'hs' for the horseshoe sparesness prior """
    """ hot start will use least squares values as starting point """
    data = loadmat(data_path)

    y_est = data['y_estimation'].flatten()
    u_est = data['u_estimation'].flatten()
    y_val = data['y_validation'].flatten()
    u_val = data['u_validation'].flatten()


    # build regression matrix
    est_input_matrix = build_input_matrix(u_est, input_order)
    val_input_matrix = build_input_matrix(u_val, input_order)

    # trim measurement vectors to suit regression matrix
    max_delay = (input_order-1)
    y_est = y_est[int(max_delay):]
    y_val = y_val[int(max_delay):]

    # calcualte an intial guess using least squares (ML)
    if hot_start:
        Ainv = np.linalg.pinv(est_input_matrix)
        b_init = np.matmul(Ainv, y_est)
    else:
        b_init = np.zeros((input_order))

    # Run Stan
    def init_function():
        sig_e = data['sig_e'].flatten()
        output = dict(b_coefs=b_init * np.random.uniform(0.8, 1.2, len(b_init)),
                      sig_e=(sig_e * np.random.uniform(0.8, 1.2))[0],
                      b_coefs_hyperprior=np.abs(np.random.standard_cauchy(len(b_init))),
                      shrinkage_param=np.abs(np.random.standard_cauchy(1))[0]
                      )
        return output

    stan_data = {'input_order': int(input_order),
                 'no_obs_est': len(y_est),
                 'no_obs_val': len(y_val),
                 'y_est': y_est,
                 'est_input_matrix': est_input_matrix,
                 'val_input_matrix': val_input_matrix
                 }

    # specify model file
    if prior == 'hs':
        model_path = 'stan/fir_hs.pkl'
        if Path(model_path).is_file():
            model = pickle.load(open(model_path, 'rb'))
        else:
            model = pystan.StanModel(file='stan/fir.stan')
            with open(model_path, 'wb') as file:
                pickle.dump(model, file)
    elif prior == 'tc':
        model_path = 'stan/fir_tc.pkl'
        if Path(model_path).is_file():
            model = pickle.load(open(model_path, 'rb'))
        else:
            model = pystan.StanModel(file='stan/fir_tc.stan')
            with open(model_path, 'wb') as file:
                pickle.dump(model, file)
    else:
        print("invalid prior, options are 'hs' or 'tc' ")

    fit = model.sampling(data=stan_data, init=init_function, iter=6000, chains=4)

    traces = fit.extract()

    return (fit, traces)
Example #23
0
  real b;
}

model {
  for (n in 1:N){
    Y[n] ~ bernoulli_logit(a * X[n] + b);
  }
}

"""

# +
# PyStanはコンパイルが必要(C++でコンパイルされる)
# 結構時間かかる

sm = pystan.StanModel(model_code= stan_model)

# +
# dataブロックに入れるデータを辞書型で渡す

stan_data = {"N":df.shape[0], "X":df["log10 C"], "Y":df["death"]}

# +
# MCMCでサンプリング

fit = sm.sampling(data = stan_data, iter = 2000, warmup=500, chains=3, seed=123)

# +
# 結果の抽出
# 事後分布の平均値や誤差が表示される
# Rhat<=1ならうまく収束している
Example #24
0
shock_data['replicate_number'] = 0
data = pd.concat([mlg910, shock_data], ignore_index=True)
data.dropna(axis=1, inplace=True)

# Insert the shock rate identifier.
data.loc[:, 'idx'] = 0

# Rescale the intensities to meaningful values.
max_exp = data['exposure_ms'].max()
data['scaled_intensity'] = (data['intensity'] -
                            data['mean_bg']) * max_exp / data['exposure_ms']

data = data[data['scaled_intensity'] >= 0].copy()

# Load the stan model.
model = pystan.StanModel('../stan/complete_analysis.stan')

# Assemble the data dictionary.
data_dict = dict(
    J1=6,
    J2=1,
    N1=len(data[data['rbs'] == 'mlg910']),
    N2=len(data[data['rbs'] != 'mlg910']),
    repl=data[data['replicate_number'] > 0]['replicate_number'].values.astype(
        int),
    shock=np.ones(len(data[data['rbs'] != 'mlg910'])).astype(int),
    A=data[data['rbs'] == 'mlg910']['area'],
    I_A_sc=data[data['rbs'] == 'mlg910']['scaled_intensity'],
    I_A_sr=data[data['rbs'] != 'mlg910']['scaled_intensity'],
    ntot=CHANNEL_NUM,
    sigma_ntot=CHANNEL_SEM,
Example #25
0
sales_df_5['date'] = sales_df_5['date'].view('int64') // 10**9
print(sales_df_5.head())

sales = sales_df_5['sales']
T = len(sales)

stan_data = {'T': T, 'y': sales}

filename = '5-7-2-autoregressive'

if os.path.exists('%s.pkl' % filename):
    sm = pickle.load(open('%s.pkl' % filename, 'rb'))
    # sm = pystan.StanModel(file='5-4-1-simple-reg.stan')
else:
    # a model using prior for mu and sigma.
    sm = pystan.StanModel(file='%s.stan' % filename)

control = {'adapt_delta': 0.8, 'max_treedepth': 16}

mcmc_result = sm.sampling(data=stan_data,
                          seed=1,
                          chains=4,
                          iter=8000,
                          warmup=2000,
                          control=control,
                          thin=6)

print(mcmc_result)
mcmc_result.plot()
plt.show()
Example #26
0
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
import torch
import torch.distributions as dist

model_name = 'mix'

# Compile stan model, if needed. Otherwise, load model.
if os.path.exists('{}.pickle'.format(model_name)):
    # Load model if it is cached.
    sm = pickle.load(open('{}.pickle'.format(model_name), 'rb'))
else:
    # compile model
    sm = pystan.StanModel(file='{}.stan'.format(model_name))
    # save model for later use.
    with open('{}.pickle'.format(model_name), 'wb') as f:
        pickle.dump(sm, f)

# Set random seed
np.random.seed(0)

# Simulate data
N = 500
mu_true = 2
sig_true = .3
y = np.random.randn(N) * sig_true + mu_true

# Fit STAN model (NUTS)
Kmcmc = 3
Example #27
0
#              # 'z0':mu0.flatten(),
#              }

################ Coupled version
# state initialisation point
z_init = np.zeros((4, no_obs + 1))
z_init[0, :-1] = y[0, :]
z_init[1, :-1] = y[1, :]
z_init[0, -1] = y[0, -1]  # repeat last entry
z_init[1, -1] = y[1, -1]  # repeat last entry
z_init[2, :-2] = (y[0, 1:] - y[0, 0:-1]) / Ts
z_init[2, -1:] = z_init[2, -3]
z_init[3, :-2] = (y[1, 1:] - y[1, 0:-1]) / Ts
z_init[3, -1:] = z_init[3, -3]

model = pystan.StanModel(file='stan/pendulum_coupled_noprior.stan')
mu0 = np.zeros((4, ))
cP0 = np.array(
    [np.deg2rad(10),
     np.deg2rad(10),
     np.deg2rad(100),
     np.deg2rad(100)])
stan_data = {
    'no_obs': no_obs,
    'Ts': Ts[0, 0],
    'y': y,
    'u': u.flatten(),
    'Lr': Lr,
    'Mp': Mp,
    'Lp': Lp,
    'g': g,
Example #28
0
ind = np.argmax(mag)
omega_init = omegas[ind]

A_init = np.round(A[ind] / N * 100) / 100
B_init = np.round(B[ind] / N * 100) / 100

plt.plot(omegas, mag)
plt.title('Peak: omega = ' + str(omega_init) + ' A = ' + str(A_init) +
          ' B = ' + str(B_init))
plt.show()

model_path = 'model.pkl'
if Path(model_path).is_file():
    model = pickle.load(open(model_path, 'rb'))
else:
    model = pystan.StanModel(file=model_path[:-4] + '.stan')
    with open(model_path, 'wb') as file:
        pickle.dump(model, file)
# model = pystan.StanModel(file='model.stan')


def init_function():
    output = dict(alpha=omega_init * np.random.uniform(0.8, 1.2))
    return output


stan_data = {'N': N, 'x': x, 'y': y, 'lambda1': lambda1}

fit = model.sampling(
    data=stan_data, init=init_function, iter=6000, warmup=4000,
    chains=4)  #, control=dict(adapt_delta=0.9, max_treedepth=13))
Example #29
0
    range(1, 1 + len(observedLanguages)))
dat["Total2Hidden"] = [1] + list(range(
    2, 2 + len(hiddenLanguages))) + [0 for _ in observedLanguages]
dat["ParentDistance"] = [0] + [
    distanceToParent[x] for x in hiddenLanguages + observedLanguages
]
dat["CovarianceMatrix"] = covarianceMatrix
dat["prior_only"] = 0
dat["Components"] = 2
dat["FamiliesLists"] = familiesLists
dat["FamiliesNum"] = len(familiesLists)
dat["FamiliesSize"] = len(familiesLists[0])

print(dat)

sm = pystan.StanModel(file=f'{__file__[:-3]}.stan')

#stepping = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
stepping = [0.0, 0.01, 0.02, 0.05, 0.08, 0.1, 0.2, 0.3, 0.4, 0.7, 1.0]
#stepping = [0.0, 0.005, 0.01, 0.015, 0.02, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05, 0.06, 0.075, 0.08, 0.085, 0.09, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5,0.6,  0.7, 0.8, 1.0]


def mean(x):
    return sum(x) / len(x)


import torch

for replicate in range(10):
    perStone = []
    for idx in range(len(stepping) - 1):
Example #30
0
             'n2':hidden_dim1,
             'm1':z_dim,
             'N': 150,
             'x':standardized_data.T,
             'W1': decoder_dict['linear1.weight'].T.numpy(),
             'b1': decoder_dict['linear1.bias'].T.numpy().reshape(1,hidden_dim2),
             'W2': decoder_dict['linear2.weight'].T.numpy(),
             'b2': decoder_dict['linear2.bias'].T.numpy().reshape(1,hidden_dim1),
             'W3': decoder_dict['out1.weight'].T.numpy(),
             'b3': decoder_dict['out1.bias'].T.numpy().reshape(1,input_dim),
             'cov1': np.identity(input_dim)*9,
             'mu1': np.array([0]*input_dim),
             'cov2': np.identity(input_dim)*0.25,
             'mu2': np.array([0]*input_dim)*0}
 #### stan code
 sm = pystan.StanModel(file='iris_beta.stan')
 fit = sm.sampling(data=stan_data, iter=200, warmup=20, chains=3)
 out = fit.extract(permuted=True)
 print(fit)
 
 mean_pi = out['pi'].mean(axis=0)
 mean_mu = out['theta_mu_star'].mean(axis=0)
 mean_sd = out['theta_sd_star'].mean(axis=0)
 post_pdf_contribs = scipy.stats.norm.pdf(np.atleast_3d(standardized_data),mean_mu,mean_sd[np.newaxis,:,:])
 post_pdf_contribs=post_pdf_contribs.prod(axis=1)*mean_pi[np.newaxis,:]
 cluster_2=np.argmax(post_pdf_contribs,axis=1)+1
 standardized_iris["Cluster_2"]=cluster_2
 
 sns.set(font_scale=2)
 g=sns.pairplot(standardized_iris,vars=['Sepal Length', 'Sepal Width', 'Petal Length','Petal Width'],hue="Cluster_2",height=4)
 g._legend.set_title("Cluster")