Exemple #1
0
def stan_sampler_advi(model_code):
    sm = CmdStanModel(stan_file=model_code)
    fit = sm.variational(iter=svi_steps,
                         algorithm="fullrank",
                         output_samples=iterations,
                         tol_rel_obj=10)
    samples = pd.Series(
        fit.variational_sample[fit.column_names.index("theta")])
    return samples.iloc[np.random.permutation(
        len(samples))].reset_index(drop=True)
Exemple #2
0
		real<lower=0, upper=1> probA;
	}

	model {
		occur ~ binomial(N, probA);
	}
""")

sm = CmdStanModel(stan_file=modelfile)

# maximum likelihood estimation
optim = sm.optimize(data=mdl_data).optimized_params_pd
optim[optim.columns[~optim.columns.str.startswith("lp")]]

# variational inference
vb = sm.variational(data=mdl_data)
vb.variational_sample.columns = vb.variational_params_dict.keys()
vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.
                                           str.startswith(("lp", "log_"))]
vb.variational_params_pd[vb_name]
vb.variational_sample[vb_name]

# Markov chain Monte Carlo
fit = sm.sample(data=mdl_data,
                show_progress=True,
                chains=4,
                iter_sampling=50000,
                iter_warmup=10000,
                thin=5)

fit.draws().shape  # iterations, chains, parameters
Exemple #3
0
	}

	generated quantities { // generate tau here
		int<lower=1,upper=N> tau = categorical_logit_rng(lp);
	}
""")

sm_modif = CmdStanModel(stan_file=modelfile_modif)
var_name = ["lambda1", "lambda2", "tau"]

# maximum likelihood estimation
optim_modif = sm_modif.optimize(data=mdl_data).optimized_params_pd
optim_modif[optim_modif.columns[~optim_modif.columns.str.startswith("lp")]]

# variational inference
vb_modif = sm_modif.variational(data=mdl_data)
vb_modif.variational_sample.columns = vb_modif.variational_params_dict.keys()
vb_name = vb_modif.variational_params_pd.columns[
    ~vb_modif.variational_params_pd.columns.str.startswith(("lp", "log_"))]
vb_modif.variational_params_pd[vb_name]
vb_modif.variational_sample[vb_name]

# Markov chain Monte Carlo
fit_modif = sm_modif.sample(data=mdl_data,
                            show_progress=True,
                            chains=4,
                            iter_sampling=50000,
                            iter_warmup=10000,
                            thin=5)

fit_modif.draws().shape  # iterations, chains, parameters
Exemple #4
0
	}
""")

Xrange = range(1, 5)
var_name_repar_array = [f"locs[{i}]"
                        for i in Xrange] + [f"A[{i},{i}]" for i in Xrange]
var_name_repar_combi = ["locs", "A"]

sm_repar = CmdStanModel(stan_file=modelfile_repar)

# maximum likelihood estimation
optim_repar = sm_repar.optimize(data=mdl_data).optimized_params_pd
optim_repar[var_name_repar_array]

# variational inference
vb_repar = sm_repar.variational(data=mdl_data)
vb_repar.variational_sample.columns = vb_repar.variational_params_dict.keys()
vb_name_repar = vb_repar.variational_params_pd.columns[
    ~vb_repar.variational_params_pd.columns.str.startswith(("lp", "log_"))]
vb_repar.variational_params_pd[var_name_repar_array]
vb_repar.variational_sample[var_name_repar_array]

# Markov chain Monte Carlo
fit_repar = sm_repar.sample(data=mdl_data,
                            show_progress=True,
                            chains=4,
                            iter_sampling=50000,
                            iter_warmup=10000,
                            thin=5)

fit_repar.draws().shape  # iterations, chains, parameters
Exemple #5
0
    def _fit_dx(self,
                X_obs,
                ts,
                S,
                R,
                known_rates=[],
                fit_params={},
                model_params={}):

        # Estimate Derivatives using finite differences
        y = []
        for i in range(1, X_obs.shape[0]):
            y.append((X_obs[i, :] - X_obs[i - 1, :]) / (ts[i] - ts[i - 1]))
        y = np.vstack(y)

        X_obs = X_obs[:-1]
        extended_X_obs = np.hstack([X_obs, np.ones((X_obs.shape[0], 1))])
        masked_X_obs = []
        for i in range(X_obs.shape[0]):
            ap = extended_X_obs[i, :] * (R == 1)
            ap += (extended_X_obs[i, :] * (R == 2))**2
            ap_mask = ap + (ap == 0).astype(np.float32)
            masked_X_obs.append(np.prod(ap_mask, axis=1))
        pre_applied_stoichiometries = np.vstack(masked_X_obs)

        data = {
            'N': pre_applied_stoichiometries.shape[0],
            'M': S.T.shape[0],
            'D': S.T.shape[1],
            'D1': len(known_rates),
            'stoichiometric_matrix': S.T,
            'rate_matrix': pre_applied_stoichiometries,
            'y': y,
            'known_rates': known_rates
        }

        default_model_params = {
            'm0': 10,
            'slab_scale': 1,
            'slab_df': 2,
            'sigma': 1,
            'noise_sigma': 1
        }
        default_model_params = {**default_model_params, **model_params}
        data = {**data, **default_model_params}

        if default_model_params['noise_sigma'] <= 0:
            file = "models/horseshoe_normal_est_dx.stan"
        else:
            file = "models/horseshoe_normal_fixed_dx.stan"
        file = os.path.join(os.path.dirname(__file__), file)

        model = CmdStanModel(stan_file=file)

        default_fit_params = {
            'chains': 4,
            'iter_warmup': 1000,
            'iter_sampling': 1000,
            'optimize': False,
            'init': None,
            'show_progress': False,
            'variational': False,
            'algorithm': 'meanfield',
            'v_iters': 1000,
            'v_grad_samples': None,
            'v_elbo_samples': None
        }
        default_fit_params = {**default_fit_params, **fit_params}

        if default_fit_params['optimize']:
            fit = model.optimize(data=data, inits=default_fit_params['init'])
        elif default_fit_params['variational']:
            fit = model.variational(
                data=data,
                iter=default_fit_params['v_iters'],
                grad_samples=default_fit_params['v_grad_samples'],
                elbo_samples=default_fit_params['v_elbo_samples'],
                algorithm=default_fit_params['algorithm'],
                require_converged=False)
            fit.variational_sample.columns = fit.column_names
        else:
            fit = model.sample(
                data=data,
                chains=default_fit_params['chains'],
                iter_warmup=default_fit_params['iter_warmup'],
                iter_sampling=default_fit_params['iter_sampling'],
                inits=default_fit_params['init'],
                refresh=1,
                show_progress=default_fit_params['show_progress'])

        return fit
Exemple #6
0
    def _fit_non_dx(self,
                    X0,
                    X_obs,
                    ts,
                    S,
                    R,
                    observed_species_indices,
                    regularized=True,
                    additive=False,
                    known_rates=[],
                    fit_params={},
                    model_params={}):

        data = {
            'N': ts[1:].shape[0],
            'M': S.shape[1] - 1,
            'M_obs': len(observed_species_indices),
            'obs_idx': [i + 1 for i in observed_species_indices],  # stan idx
            'D': S.shape[0],
            'D1': len(known_rates),
            'y0': X0,
            'y': X_obs,
            'ts': ts,
            'known_rates': known_rates
        }

        default_model_params = {
            'm0': 10,
            'slab_scale': 1,
            'slab_df': 2,
            'tau0': 0.001,
            'noise_sigma': 1
        }
        default_model_params = {**default_model_params, **model_params}
        data = {**data, **default_model_params}

        default_fit_params = {
            'chains': 4,
            'iter_warmup': 1000,
            'iter_sampling': 1000,
            'optimize': False,
            'init': None,
            'show_progress': False,
            'max_treedepth': 10,
            'variational': False,
            'algorithm': 'meanfield',
            'v_iters': 1000,
            'v_grad_samples': None,
            'v_elbo_samples': None,
            'prior_predictive': False
        }
        default_fit_params = {**default_fit_params, **fit_params}

        model_str = self._create_non_derivative_stan_model(
            S, R, regularized, additive,
            default_fit_params['prior_predictive'])
        fp = tempfile.NamedTemporaryFile(mode='w+t', suffix=".stan")
        fp.write(model_str)
        fp.seek(0)

        model = CmdStanModel(stan_file=fp.name)

        if default_fit_params['optimize']:
            fit = model.optimize(data=data, inits=default_fit_params['init'])
        elif default_fit_params['variational']:
            fit = model.variational(
                data=data,
                iter=default_fit_params['v_iters'],
                grad_samples=default_fit_params['v_grad_samples'],
                elbo_samples=default_fit_params['v_elbo_samples'],
                algorithm=default_fit_params['algorithm'],
                require_converged=False)
            fit.variational_sample.columns = fit.column_names
        else:
            if default_fit_params['prior_predictive']:
                fit = model.sample(
                    data=data,
                    iter_sampling=default_fit_params['iter_sampling'],
                    inits=default_fit_params['init'],
                    show_progress=default_fit_params['show_progress'],
                    refresh=1,
                    fixed_param=True)
            else:
                fit = model.sample(
                    data=data,
                    chains=default_fit_params['chains'],
                    iter_warmup=default_fit_params['iter_warmup'],
                    iter_sampling=default_fit_params['iter_sampling'],
                    inits=default_fit_params['init'],
                    refresh=1,
                    show_progress=default_fit_params['show_progress'],
                    max_treedepth=default_fit_params['max_treedepth'])
        fp.close()
        return fit