with pm.Model() as model_1:
    mu_A = pm.Normal("mu_A", pooled_mean, sd = variance)
    mu_B = pm.Normal("mu_B", pooled_mean, sd = variance)
    std_A = pm.Uniform("std_A", 1/100, 100)
    std_B = pm.Uniform("std_B", 1/100, 100)
    nu_minus_1 = pm.Exponential("nu-1", 1.0/29)


# In[13]:


with model_1:
    obs_A = pm.StudentT("obs_A", mu = mu_A, lam = 1.0/std_A**2, nu = nu_minus_1+1, observed = Control_Matrix.Pvs_per_session)
    obs_B = pm.StudentT("obs_B", mu = mu_B, lam = 1.0/std_B**2, nu = nu_minus_1+1, observed = Variant_BT.Pvs_per_session)
    start = pm.find_MAP()
    step = pm.Metropolis(vars=[mu_A, mu_B, std_A, std_B, nu_minus_1])
    trace_1 = pm.sample(25000, step=step)
    burned_trace_1 = trace_1[10000:]


# You can now visualise the posterior distribution of the group means:

# In[14]:


figsize(12.5, 4)
control_mean = burned_trace_1['mu_A']
variant_mean = burned_trace_1['mu_B']
plt.hist(control_mean, bins = 100, label=r'Posterior distribution of $\mu_{Control_Matrix}$', color = 'grey')
plt.hist(variant_mean, bins = 100, label=r'Posterior distribution of $\mu_{Variant_BT}$', color = 'orange')
plt.title('Posterior distributions for each respective group mean')
Esempio n. 2
0
def bayesian_logistic(
    df_,
    feature_names,
    target_name,
    results,
    participant,
    experiment,
    dot_dir,
    window=0,
):
    """
    Since the classification is redundent after the features and targets are 
    ready, I would rather to make a function for the redundent part
    
    Inputs:
        df_                 : dataframe of a given subject
        feature_names       : names of features
        target_name         : name of target
        results             : dictionary like object, update itself every cross validation
        participant         : string, for updating the results dictionary
        experiment          : for graph of the tree model
        dot_dit             : directory of the tree plots
        window              : integer value, for updating the results dictionary
    return:
        results
    """
    features, targets = [], []
    for block, df_block in df_.groupby('blocks'):
        # preparing the features and target by shifting the feature columns up
        # and shifting the target column down
        feature = (
            df_block[feature_names].shift(
                window)  # shift downward so that the last n_back rows are gone
            .dropna()  # since some of rows are gone, so they are nans
            .values  # I only need the matrix not the data frame
        )
        target = (
            df_block[target_name].shift(
                -window
            )  # same thing for the target, but shifting upward, and the first n_back rows are gone
            .dropna().values)
        features.append(feature)
        targets.append(target)
    features = np.concatenate(features)
    targets = np.concatenate(targets)
    features, targets = shuffle(features, targets)
    # for each classifier, we fit-test cross validate the classifier and save the
    # classification score and the weights of the attributes
    model_name = 'bayes_logistic'

    # this is for initialization
    df_train = pd.DataFrame(features, columns=feature_names)
    df_train[target_name] = targets
    scaler = StandardScaler()

    for name in feature_names:
        if 'RT' in name:
            df_train[name] = scaler.fit_transform(
                df_train[name].values.reshape(-1, 1))
    niter = 1000
    formula = '{} ~'.format(target_name)
    for name in feature_names:
        formula += ' + {}'.format(name)

    with pm.Model() as model:
        pm.glm.GLM.from_formula(
            formula,
            df_train,
            family=pm.glm.families.Binomial(),
        )
        start = pm.find_MAP(progressbar=False)
        try:
            step = pm.NUTS(scaling=start, )
        except:
            step = pm.Metropolis()
        trace = pm.sample(
            niter,
            start=start,
            step=step,
            njobs=4,
            random_seed=12345,
            progressbar=0,
        )
    df_trace = pm.trace_to_dataframe(trace[niter // 2:])
    intercept = df_trace['Intercept'].mean()
    df_test = pd.DataFrame(features, columns=feature_names)
    weights = df_trace.iloc[:, 1:].values.mean(0)
    preds = predict(df_test.values, weights, intercept)
    # score the predictions
    score = roc_auc_score(targets, preds)
    results['sub'].append(participant)
    results['model'].append(model_name)
    results['score'].append(score)
    results['window'].append(window)
    for iii, name in enumerate(feature_names):
        results[name].append(df_trace[name].values.mean())

    print('sub {},model {},window {},score={:.3f}'.format(
        participant, model_name, window, score))
    return results, trace
Esempio n. 3
0
                # stronger beliefs could lead to different priors
                p_test = pm.Uniform("p_test", 0, 1)
                p_control = pm.Uniform("p_control", 0, 1)

                # deterministic delta variable, our unknown of interest
                # deterministic is not based on a distribution
                delta = pm.Deterministic("delta", p_test - p_control)

                # Set of observations
                # Bernoulli stochastic variables generated via our observed values
                obs_A = pm.Bernoulli("obs_A", p_test, observed=test_obs)
                obs_B = pm.Bernoulli("obs_B", p_control, observed=control_obs)

                # monte carlo simulation, last step of model, this part takes the longest
                # metropolis-hastings algo, gets sequence of random variables from prob. dist.
                step = pm.Metropolis()
                trace = pm.sample(20000, step=step, njobs=2)
                burned_trace = trace[10000:]

            # generated likelihood, prior, and posterior distributions as arrays of values
            p_test_samples = burned_trace["p_test"]
            p_control_samples = burned_trace["p_control"]
            delta_samples = burned_trace["delta"]
            delta_min = delta_samples.min()
            delta_max = delta_samples.max()
            target_cann = -0.02
            abs_target = (control_conv) * target_cann

            # results and calculations data for DOMO
            abs_target = (control_conv) * target_cann
            prob_cann = 100 - (round(
Esempio n. 4
0
 def test_run(self):
     with self.build_model():
         pm.sample(50, step=[pm.NUTS(), pm.Metropolis()])
Esempio n. 5
0
                                    S2=S2,
                                    states=states1,
                                    observed = dataset[4])
    
    states2 = HMMStatesN('states2',P=P,PA=PA, shape=len(dataset[205]))
    
    emission2 = HMMGaussianEmissions('emission2',
                                    A1=A1,
                                    A2=A1,
                                    S1=S1,
                                    S2=S2,
                                    states=states2,
                                    observed = dataset[205])

    start = pm.find_MAP(fmin=optimize.fmin_powell)
    step1 = pm.Metropolis(vars=[P, PA, A1, A2, S1, S2, emission1,emission2])
    step2 = pm.BinaryGibbsMetropolis(vars=[states1,states2])
    trace = pm.sample(10000, start=start, step=[step1, step2])
	
pm.traceplot(trace)
pm.summary(trace[500:])

sample1_avg=np.average(trace['states1'][500:],axis=0)
sample2_avg=np.average(trace['states2'][500:],axis=0)

plt.figure()
plt.plot(dataset[4])
plt.plot((sample1_avg)*0.6)

plt.figure()
plt.plot(dataset[205])
Esempio n. 6
0
def outlier_detection(data, uncertainties):
    #Outlier detection
    #This follows the analysis from Hogg, Bovy and Lang (2010): https://arxiv.org/pdf/1008.4686.pdf
    #And is edited slightly from the pymc3 tutorial
    #here: https://docs.pymc.io/notebooks/GLM-robust-with-outlier-detection.html

    with pm.Model() as mdl_signoise:

        sig_hyperprior = pm.Uniform('sig', 0.0, 50.0)
        vel_hyperprior = pm.Normal('vel', 0.0, 50.0)

        vel_tracers = pm.Normal('vel-tracers',
                                mu=vel_hyperprior,
                                sd=uncertainties,
                                shape=len(data))

        ## Define weakly informative priors for the mean and variance of outliers
        vel_out = pm.Normal('vel-out', mu=0, sd=100, testval=pm.floatX(1.))
        sig_out = pm.HalfNormal('sig-out', sd=100, testval=pm.floatX(1.))

        ## Define Bernoulli inlier / outlier flags according to a hyperprior
        ## fraction of outliers, itself constrained to [0,.5] for symmetry
        frac_outliers = pm.Uniform('frac-outliers', lower=0., upper=.5)
        is_outlier = pm.Bernoulli('is-outlier',
                                  p=frac_outliers,
                                  shape=len(data))

        ## Extract observed y and sigma_y from dataset, encode as theano objects
        data_thno = thno.shared(np.asarray(data, dtype=thno.config.floatX),
                                name='data')
        uncert_thno = thno.shared(np.asarray(uncertainties,
                                             dtype=thno.config.floatX),
                                  name='uncertainties')

        ## Use custom likelihood using DensityDist
        likelihood = pm.DensityDist('likelihood',
                                    logp_signoise,
                                    observed={
                                        'data': data_thno,
                                        'isoutlier': is_outlier,
                                        'velin': vel_tracers,
                                        'sigin': sig_hyperprior,
                                        'velout': vel_out,
                                        'sigout': sig_out
                                    })

    with mdl_signoise:
        ## two-step sampling to create Bernoulli inlier/outlier flags
        step1 = pm.Metropolis([frac_outliers, vel_out, sig_out])
        step2 = pm.step_methods.BinaryGibbsMetropolis([is_outlier])

        ## take samples
        # I got an error when njob>1 here. Not sure why!
        traces_signoise = pm.sample(20000,
                                    step=[step1, step2],
                                    tune=10000,
                                    progressbar=True,
                                    njobs=1)

    #Plot things
    pm.traceplot(traces_signoise)
    plt.savefig('Plots/outlier_traceplot.pdf')

    index = np.arange(1, len(data) + 1)
    fig, ax = plt.subplots(figsize=(10, 6))
    mean_outlier_prob = np.mean(traces_signoise['is-outlier'][-1000:, :],
                                axis=0)
    ax.scatter(mean_outlier_prob, index)
    ax.set_yticks(index)
    ax.set_yticklabels(["{} km/s".format(int(d)) for d in data])

    for i in index:
        ax.axhline(i, c='k', alpha=0.1, linestyle='dashed')

    ax.set_title('Outlier Probabilities')
    ax.set_xlabel(r'$p_{\mathrm{outlier}}$')
    ax.set_ylabel('Data point')
    fig.tight_layout()
    fig.savefig('Plots/outlier_probabilities.pdf')

    #KDE approximation to the new posterior
    xx = np.linspace(0.0, 30.0, 1000)
    kde_approximation2 = stats.gaussian_kde(traces_signoise['sig'])

    fig, ax = plt.subplots(figsize=(10, 6))
    ax.plot(xx, kde_approximation2(xx), c='r', linewidth=3.0)
    ax.hist(traces_signoise['sig'],
            100,
            facecolor='0.8',
            edgecolor='k',
            histtype='stepfilled',
            normed=True,
            linewidth=2.0)
    ax.axvline(xx[np.argmax(kde_approximation2(xx))],
               c='k',
               linestyle='dashed',
               linewidth=2.0)

    ax.set_title(r'$\sigma$- with outlier detection')
    ax.set_xlim([0.0, 30.0])
    ax.set_ylabel(r'PDF')
    ax.set_yticks([])
    #ax.tick_params(axis='both', which='major', labelsize=15)
    ax.set_xlabel(r'$\sigma$ (kms$^{-1}$)')

    fig.tight_layout()
    fig.savefig('Plots/pdf_outliers.jpg')

    return kde_approximation2
Esempio n. 7
0
    mix = np.random.normal(np.repeat(means, n_cluster),
                           np.repeat(std_devs, n_cluster))

    with pm.Model() as model_ug:
        # Each observation is assigned to a cluster/component with probability p
        p = pm.Dirichlet('p', a=np.ones(clusters))
        category = pm.Categorical('category', p=p, shape=n_total)

        # We estimate the unknown gaussians means and standard deviation
        means = pm.Normal('means', mu=[10, 20, 35], sd=2, shape=clusters)
        sd = pm.HalfCauchy('sd', 5)

        y = pm.Normal('y', mu=means[category], sd=sd, observed=mix)

        step1 = pm.ElemwiseCategorical(vars=[category], values=range(clusters))
        step2 = pm.Metropolis(vars=[means, sd, p])
        trace_ug = pm.sample(10000, step=[step1, step2], nchains=1)

        chain_ug = trace_ug[1000:]
        pm.traceplot(chain_ug)

    plt.figure()
    ppc = pm.sample_ppc(chain_ug, 50, model_ug)
    for i in ppc['y']:
        sns.kdeplot(i, alpha=0.1, color='b')

    sns.kdeplot(
        np.array(mix), lw=2,
        color='k')  # you may want to replace this with the posterior mean
    plt.xlabel('$x$', fontsize=14)
Esempio n. 8
0
        with add_prefix(f'Process #{rank}'):
            dat, err = ss
            ell, pol = bs
            print('Initializing model.')
            model = sed2.fg_models.init(model_name, dat, err, ac, map_coll,
                                        do_sim)

            with model:  # pymc3 wants the model object in the context stack when running things
                # Also, don't try to split into multiple processes, because it probably won't help given MPI is around
                print('Sampling with model.')
                if step == 'metropolis':
                    trace = pymc.sample(10000,
                                        tune=1000,
                                        chains=2,
                                        step=pymc.Metropolis(),
                                        progressbar=False,
                                        return_inferencedata=False)
                elif step == 'None':
                    trace = pymc.sample(10000,
                                        tune=1000,
                                        chains=2,
                                        progressbar=False,
                                        return_inferencedata=False)
                else:
                    raise NameError('Unknown step method.')

                file_write(ell, pol, trace, post_dir)
                file_write(ell, pol + '_model', model, post_dir)

    else:  # These models fit over ell, so only split by pol
Esempio n. 9
0
    predictorNames = x.columns
    n_predictors = len(predictorNames)

# THE MODEL
with pm.Model() as model:
    # define the priors
    beta0 = pm.Normal('beta0', mu=0, tau=1.0E-12)
    beta1 = pm.Normal('beta1', mu=0, tau=1.0E-12, shape=n_predictors)
    tau = pm.Gamma('tau', 0.01, 0.01)
    mu = beta0 + pm.dot(beta1, x.values.T)
    # define the likelihood
    yl = pm.Normal('yl', mu=mu, tau=tau, observed=y)
    # Generate a MCMC chain
    start = pm.find_MAP()
    step1 = pm.NUTS([beta1])
    step2 = pm.Metropolis([beta0, tau])
    trace = pm.sample(10000, [step1, step2], start, progressbar=False)

# EXAMINE THE RESULTS
burnin = 5000
thin = 1

# Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

# Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars =[mu, tau])
#pm.autocorrplot(trace, vars =[beta0])

## Plot KDE and sampled values for each parameter.
Esempio n. 10
0
    def fit(
        self,
        draws: int = 500,
        chains: int = 4,
        trace_size: int = 500,
        method: Sampler = Sampler.NUTS,
        map_initialization: bool = False,
        finalize: bool = True,
        step_kwargs: Dict = None,
        sample_kwargs: Dict = None,
    ):
        """Fit the PMProphet model.

        Parameters
        ----------
        draws : int, > 0
            The number of MCMC samples.
        chains: int, =4
            The number of MCMC draws.
        trace_size: int, =1000
            The last N number of samples to keep in the trace
        method : Sampler
            The sampler of your choice
        map_initialization : bool
            Initialize the model with maximum a posteriori estimates.
        finalize : bool
            Finalize the model.
        step_kwargs : dict
            Additional arguments for the sampling algorithms
            (`NUTS` or `Metropolis`).
        sample_kwargs : dict
            Additional arguments for the PyMC3 `sample` function.
        """

        if sample_kwargs is None:
            sample_kwargs = {}
        if step_kwargs is None:
            step_kwargs = {}
        if chains * draws < trace_size and method != Sampler.ADVI:
            raise Exception(
                "Desired trace size should be smaller than the sampled data points"
            )

        self.skip_first = (chains *
                           draws) - trace_size if method != Sampler.ADVI else 0
        self.chains = chains

        if finalize:
            self.finalize_model()

        with self.model:
            if map_initialization:
                self.start = pm.find_MAP(maxeval=10000)
                if draws == 0:
                    self.trace = {
                        k: np.array([v])
                        for k, v in self.start.items()
                    }

            if draws:
                if method in (Sampler.NUTS, Sampler.METROPOLIS):
                    step_method = pm.NUTS(
                        **step_kwargs
                    ) if method == Sampler.NUTS else pm.Metropolis(
                        **step_kwargs)
                    self.trace = pm.sample(
                        draws,
                        chains=chains,
                        step=step_method,
                        start=self.start if map_initialization else None,
                        **sample_kwargs)
                elif method == Sampler.ADVI:
                    res = pm.fit(
                        draws,
                        start=self.start if map_initialization else None)
                    self.trace = res.sample(trace_size)
Esempio n. 11
0
"""
Reimplementation of Rate_1 model from Chapter 3, Bayesian Cognitive Modeling.

Created Aug/2015 by Johannes Keyser <*****@*****.**>
"""

import pymc3 as pm

n = 100  # n = 10, or n = 100, or k = 0
k = 50  # k =  5, or k =  99, or n = 1
# For n=100, k=50, theta's posterior gets more narrow, b/c more data.
# For n=100, k=99, theta's posterior narrows down further, and gets almost
#                  squeezed against its maximum, where it drops abruptly.
# For n=1, k=0, theta's posterior is still very broad, but clearly leans toward
#               lower values. Even one data point can have a lot of influence.

model = pm.Model()

with model:
    # Prior Distribution for Rate Theta
    theta = pm.Beta('theta', alpha=1, beta=1)
    # Observed Counts
    k = pm.Binomial('k', p=theta, n=n, observed=k)
    # instantiate Metropolis-Hastings sampler
    stepFunc = pm.Metropolis()
    # draw 20,000 posterior samples (in 2 parallel chains)
    Nsample = 10000
    traces = pm.sample(Nsample, step=stepFunc, njobs=2)

axs = pm.traceplot(traces, vars=['theta'], combined=False)
axs[0][0].set_xlim([0, 1])  # manually set x-limits for comparisons
Esempio n. 12
0
def MCMC_THEANO(data, path_model, train=True, samples=2000):
    model_fit = Path(path_model)

    # Comprobamos si ya existe un archivo con el modelo entrenado
    if model_fit.is_file():
        file = open(path_model, "rb")
        samples = pickle.load(file)
    else:
        C = data['C']
        N = data['N']
        A = data['A']
        # Definimos el constructor para el modelo
        model = pm.Model()

        with model:
            # Parámetros dependientes de las variables observadas
            K = pm.Normal("K", mu=0, sigma=1, shape=(1, N))
            gpa0 = pm.Normal("gpa0", mu=0, sigma=1)
            lsat0 = pm.Normal("lsat0", mu=0, sigma=1)
            w_k_gpa = pm.Normal("w_k_gpa", mu=0, sigma=1)
            w_k_lsat = pm.Normal("w_k_lsat", mu=0, sigma=1)
            w_k_fya = pm.Normal("w_k_fya", mu=0, sigma=1)
            w_a_gpa = pm.Normal("w_a_gpa",
                                mu=np.zeros(C),
                                sigma=np.ones(C),
                                shape=C)
            w_a_lsat = pm.Normal("w_a_lsat",
                                 mu=np.zeros(C),
                                 sigma=np.ones(C),
                                 shape=C)
            w_a_fya = pm.Normal("w_a_fya",
                                mu=np.zeros(C),
                                sigma=np.ones(C),
                                shape=C)

            sigma_gpa_2 = pm.InverseGamma("sigma_gpa_2", alpha=1, beta=1)

            mu = gpa0 + (w_k_gpa * K) + pm.math.dot(A, w_a_gpa)

            # Definidos las variables observadas
            gpa = pm.Normal("gpa",
                            mu=mu,
                            sigma=pm.math.sqrt(sigma_gpa_2),
                            observed=list(data["GPA"]),
                            shape=(1, N))
            lsat = pm.Poisson("lsat",
                              pm.math.exp(lsat0 + w_k_lsat * K +
                                          pm.math.dot(A, w_a_lsat)),
                              observed=list(data["LSAT"]),
                              shape=(1, N))
            if train:
                fya = pm.Normal("fya",
                                mu=w_k_fya * K + pm.math.dot(A, w_a_fya),
                                sigma=1,
                                observed=list(data["FYA"]),
                                shape=(1, N))
            step = pm.Metropolis()
            samples = pm.sample(samples, step)
            # Guardamos el modelo entrenado
            file = open(path_model, "wb")
            pickle.dump(samples, file, protocol=-1)

    return samples
Esempio n. 13
0
def bayesian_linear_model(data,
                          N_steps=20000,
                          step="Metropolis",
                          burnin=None,
                          njobs=1,
                          progressbar=True,
                          chain_start=0,
                          output_format="rho",
                          sample_params={}):
    """Docstring for bayesian linear model.
    :data: The data used, expects a 2-d array with one dimension having 2 columns/rows
    :N_steps: The number of steps in each chain. If using NUTS sampling, this can be smaller.
    :step: The sampling method. Either "Metropolis" (faster sampling, but needs more steps) or NUTS (slower, but fewer steps)
    :burnin: number of steps to discard at the beginning of each chain. If None, half of N_steps is discarded.
    :njobs: The number of parallel jobs.
    :chain_start: The number assigned to the chain. Can be useful when aiming to combine different chains
    :progressbar: Should a progressbar for the sampling?
    :output_format: What should be returned from the sampling? If "rho" only an numpy array with the correlation
    values is returned. If "full" the whole multitrace is returned, which is useful for convergence analysis.
    :sample_params: Additional parameters for pymc3's sample function.
    :returns: Either a multitrace or a numpy array, depending on output_format
    """

    # test the data for the right format and transform it if necessary/possible
    if isinstance(data, list):
        try:
            data = np.vstack(data)
        except ValueError:
            print("Error: Data dimensions do not match!")
            return None

    if isinstance(data, np.ndarray):
        if len(data.shape) != 2:
            if len(data) == 2 and len(data[0]) != len(data[1]):
                print("Error: Data dimensions do not match!")
                return None
            else:
                print(
                    "Error: Data not a two-dimensional array, don't know what to do!"
                )
                return None
        else:
            if data.shape[1] != 2:
                if data.shape[0] != 2:
                    print(
                        "Error: No dimension with 2 variables present, don't know what to do!"
                    )
                else:
                    data = data.T

    # if no burnin is specified, use half of the step number
    if burnin is None:
        burnin = N_steps / 2

    sample_params.update({
        "draws": N_steps,
        "njobs": njobs,
        "tune": burnin,
        "progressbar": progressbar
    })

    # initialize model
    basic_model = pm.Model()

    with basic_model:

        # define model priors
        m_mu = pm.Normal('mu', mu=0., sd=10, shape=2)
        nu = pm.Uniform('nu', 0, 5)
        packed_L = pm.LKJCholeskyCov('packed_L',
                                     n=2,
                                     eta=nu,
                                     sd_dist=pm.HalfCauchy.dist(1.))
        chol = pm.expand_packed_triangular(2, packed_L)
        sigma = pm.Deterministic('sigma', _Cov2Cor(chol.dot(chol.T)))

        # model likelihood
        mult_n = pm.MvNormal('mult_n', mu=m_mu, chol=chol, observed=data)

        # which sampler to use
        if step == "Metropolis":
            step = pm.Metropolis()
        elif step == "NUTS":
            step = pm.NUTS()
        sample_params.update({"step": step})

        # MCMC sample
        trace = pm.sample(**sample_params)

    # Return the full pymc3 trace or only an array of the correlation?
    if output_format == "full":
        output = trace
    else:
        output = trace["sigma"][:, 0, 1]
    return output
    def get_changepoints_location(self, n_changepoints:int) -> 'changepoint locations, list':
        """Purpose: get most likely changepoint locations given
        data and number of changepoints.
        Heavily inspired by:
        http://www.claudiobellei.com/2017/01/25/changepoint-bayesian/
        """

        if len(self.data) < 10:
            raise ValueError("Received less than 10 data points!")

        if n_changepoints < 1:
            raise ValueError("Number of changepoints must be positive integer!")

        # dictionary of means
        self.mu_d = {}

        # stochastic means
        self.mus_d = {}

        # dictionary of changepoints
        self.tau_d = {}

        # define time range
        t = np.arange(len(self.data))

        with pm.Model() as model:
            # variance
            #sigma = pm.Uniform("sigma",1.e-3,20)
            sigma = pm.HalfNormal('sigma', sd=1)
            # define means
            for i in range(n_changepoints+1):
                self.mu_d['mu_{0}'.format(i)] = pm.Uniform('mu_{0}'.format(i), lower=self.data.min(),
                                                           upper=self.data.max(),)# testval=int(len(self.data)/(n_changepoints+1)*(i+1)))

            # define changepoint locations and stochastic variable(s) _mu
            for i in (range(n_changepoints)):
                if i == 0:
                    self.tau_d['tau_{0}'.format(i)] = pm.DiscreteUniform('tau_{0}'.format(i),
                                                                    t.min(), t.max())
                    self.mus_d['mus_d_{0}'.format(i)] = T.switch(self.tau_d['tau_{0}'.format(i)] >= t,
                                                                 self.mu_d['mu_{0}'.format(i)],
                                                                 self.mu_d['mu_{0}'.format(i+1)])
                else:
                    self.tau_d['tau_{0}'.format(i)] = pm.DiscreteUniform('tau_{0}'.format(i),
                                                                         self.tau_d['tau_{0}'.format(i-1)], t.max())
                    self.mus_d['mus_d_{0}'.format(i)] = T.switch(self.tau_d['tau_{0}'.format(i)] >= t,
                                                                 self.mus_d['mus_d_{0}'.format(i-1)],
                                                                 self.mu_d['mu_{0}'.format(i+1)])

            def logp_func(data):
                """Function to be provided to PyMC3
                """
                return logp.sum()

            # define log-likelihood for the parameters given data
            logp = - T.log(sigma * T.sqr(2.0 * np.pi)) \
               - T.sqr(self.data - self.mus_d['mus_d_{0}'.format(i)]) / (2.0 * sigma * sigma)

            # define density dist
            L_obs = pm.DensityDist('L_obs', logp_func, observed=self.data)

            # start MCMC algorithm
            start = pm.find_MAP()
            step = pm.Metropolis()
            # iterate MCMC
            trace = pm.sample(self.n_iter, step, start=start, random_seed=123, progressbar=False)

            # calculate changepoints
            locations = []
            for k in range(n_changepoints):
                changepoint = Counter(trace.get_values('tau_{0}'.format(k))).most_common(1)[0][0]
                locations.append(changepoint)

        return sorted(set(locations)), trace
Esempio n. 15
0
with model:
    mc.Normal('X', mu, 1 / sigma**2)

# In[15]:

model.vars

# In[16]:

start = dict(X=2)

# In[17]:

with model:
    step = mc.Metropolis()
    trace = mc.sample(10000, step=step, start=start)

# In[18]:

x = np.linspace(-4, 12, 1000)

# In[19]:

y = stats.norm(mu, sigma).pdf(x)

# In[20]:

X = trace.get_values("X")

# In[21]:
Esempio n. 16
0
import numpy as np
import pymc3 as pm
import scipy.stats as st
import matplotlib.pyplot as plt
from scipy.integrate import quad

obs = [-4.]
niter = 5000
nburn = 2000
nchains = 3
with pm.Model() as model:
    m = pm.Uniform('m', lower=0., upper=10.)
    likelihood = pm.Normal('y_obs', mu=m, sigma=2., observed=obs)
    step_0 = pm.Metropolis(S=np.array([.01]))
    step_1 = pm.Metropolis(S=np.array([10]))
    step_2 = pm.Metropolis(S=np.array([2.5]))
    trace_0 = pm.sample(niter,
                        step=step_0,
                        cores=1,
                        tune=0,
                        chains=nchains,
                        random_seed=123)
    trace_1 = pm.sample(niter,
                        step=step_1,
                        cores=1,
                        tune=0,
                        chains=nchains,
                        random_seed=123)
    trace_2 = pm.sample(niter,
                        step=step_2,
                        cores=1,
Esempio n. 17
0
    # Prior for distribution of switchpoint location
    switchpoint = pm.DiscreteUniform('switchpoint', lower=0, upper=years)
    # Priors for pre- and post-switch mean number of disasters
    early_mean = pm.Exponential('early_mean', lam=1.)
    late_mean = pm.Exponential('late_mean', lam=1.)

    # Allocate appropriate Poisson rates to years before and after current
    # switchpoint location
    idx = arange(years)
    # theano style:
    # rate = switch(switchpoint >= idx, early_mean, late_mean)
    # non-theano style
    rate = rateFunc(switchpoint, early_mean, late_mean)

    # Data likelihood
    disasters = pm.Poisson('disasters', rate, observed=disasters_data)

    # Initial values for stochastic nodes
    start = {'early_mean': 2., 'late_mean': 3.}

    # Use slice sampler for means
    step1 = pm.Slice([early_mean, late_mean])
    # Use Metropolis for switchpoint, since it accomodates discrete variables
    step2 = pm.Metropolis([switchpoint])

    # njobs>1 works only with most recent (mid August 2014) Theano version:
    # https://github.com/Theano/Theano/pull/2021
    tr = pm.sample(1000, tune=500, start=start, step=[step1, step2], njobs=1)
    pm.traceplot(tr)
Esempio n. 18
0
    phic = pm.Uniform('phic', lower=0, upper=1, testval=.3)
    zck = pm.Bernoulli('zck', p=phic, shape=companyABC.shape)
    #     zij_ = pm.theanof.tt_rng().uniform(size=xij.shape)
    #     zij = pm.Deterministic('zij', tt.lt(zij_, phii[sbjid]))
    line = tt.constant(np.ones((len(companyABC))) * .5)
    beta_mu = pm.Deterministic(
        'beta_mu', tt.squeeze(tt.switch(tt.eq(zck, 0), liner, line)))

    Observed = pm.Weibull("Observed",
                          alpha=alpha,
                          beta=beta_mu,
                          observed=elec_faults)  # 观测值

    #     start = pm.find_MAP()
    step = pm.Metropolis([zck])
    #     step1 = pm.Slice([am0, am1])
    trace = pm.sample(4000, step=[step], init='advi+adapt_diag', tune=1000)

# with model1:
#     s = shared(pm.floatX(1))
#     inference = pm.ADVI(cost_part_grad_scale=s)
#     # ADVI has nearly converged
#     inference.fit(n=20000)
#     # It is time to set `s` to zero
#     s.set_value(0)
#     approx = inference.fit(n=10000)
#     trace = approx.sample(3000, include_transformed=True)
#     elbos1 = -inference.hist

chain = trace[2000:]
    mu1 = pm.Beta('mu1', 1, 1, shape=n_cond)
    a_Beta1 = mu1[cond_of_subj] * kappa[cond_of_subj]
    b_Beta1 = (1 - mu1[cond_of_subj]) * kappa[cond_of_subj]

    #Prior on theta
    theta0 = pm.Beta('theta0', a_Beta0, b_Beta0, shape=n_subj)
    theta1 = pm.Beta('theta1', a_Beta1, b_Beta1, shape=n_subj)
    # if model_index == 0 then sample from theta1 else sample from theta0
    theta = pm.switch(pm.eq(model_index, 0), theta1, theta0)

    # Likelihood:
    y = pm.Binomial('y', p=theta, n=n_trl_of_subj, observed=n_corr_of_subj)

    # Sampling
    start = pm.find_MAP()
    step1 = pm.Metropolis(model.vars[1:])
    step2 = pm.ElemwiseCategoricalStep(var=model_index, values=[0, 1])
    trace = pm.sample(20000, [step1, step2], start=start, progressbar=False)

# EXAMINE THE RESULTS.
burnin = 10000
thin = 10

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

## Check for mixing and autocorrelation
#pm.autocorrplot(trace, vars =[mu, kappa])

## Plot KDE and sampled values for each parameter.
Esempio n. 20
0
import pymc3 as pm

with pm.Model() as model:
    x = pm.Normal('x', 1, 1)
    x2 = pm.Potential('x2', -x**2)

    start = model.test_point
    h = pm.find_hessian(start)
    step = pm.Metropolis(model.vars, h)


def run(n=3000):
    if n == "short":
        n = 50
    with model:
        pm.sample(n, step=step, start=start)


if __name__ == '__main__':
    run()
Esempio n. 21
0
 def make_step(cls):
     args = {}
     if hasattr(cls, 'step_args'):
         args.update(cls.step_args)
     return pm.Metropolis(**args)
Esempio n. 22
0
def run(n=1000):
    if n == "short":
        n = 50
    with model:
        trace = pm.sample(10000, step=[pm.NUTS(), pm.Metropolis()])
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
plt.rcParams["figure.figsize"] = (10, 5)
np.random.seed(42)

# Prepare the data
x = uniform(0, 20).rvs(30)
eps = norm(0, 4).rvs(30)
y = 11 + 3*x + eps

# Sampling w/ Metropolis
with pm.Model() as model:
    b_0 = pm.Normal("b_0", mu=0, sd=10)
    b_1 = pm.Normal("b_1", mu=0, sd=2)
    e = pm.HalfCauchy("e", 2)
    mu = pm.Deterministic("mu", b_0 + b_1*x)
    Y = pm.Normal("Y", mu=mu, sd=e, observed=y)
    trace = pm.sample(10000, step=pm.Metropolis())
pm.autocorrplot(trace, varnames=["b_0", "b_1", "e"]);
plt.savefig("./results/4-12-autocorrelation-metropolis.png")

# Sampling w/ NUTS
with pm.Model() as model:
    b_0 = pm.Normal("b_0", mu=0, sd=10)
    b_1 = pm.Normal("b_1", mu=0, sd=2)
    e = pm.HalfCauchy("e", 2)
    mu = pm.Deterministic("mu", b_0 + b_1*x)
    Y = pm.Normal("Y", mu=mu, sd=e, observed=y)
    trace = pm.sample(10000)
pm.autocorrplot(trace, varnames=["b_0", "b_1", "e"]);
plt.savefig("./results/4-12-autocorrelation-nuts.png")
Esempio n. 24
0
    a1 = pm.Normal('a1', mu=0, tau=a1tau, shape=Nx1Lvl)
    a2 = pm.Normal('a2', mu=0, tau=a2tau, shape=Nx2Lvl)
    a1a2 = pm.Normal('a1a2', mu=0, tau=a1a2tau, shape=[Nx1Lvl, Nx2Lvl])

    b1 = pm.Deterministic('b1', a1 - T.mean(a1))
    b2 = pm.Deterministic('b2', a2 - T.mean(a2))
    b1b2 = pm.Deterministic('b1b2', a1a2 - T.mean(a1a2))

    mu = a0 + b1[x1] + b2[x2] + b1b2[x1, x2]

    # define the likelihood
    yl = pm.Normal('yl', mu=mu, tau=tau, observed=z)

    # Generate a MCMC chain
    start = pm.find_MAP()
    steps = pm.Metropolis()
    trace = pm.sample(20000, steps, start=start, progressbar=True)

# EXAMINE THE RESULTS
burnin = 2000
thin = 50

# Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

# Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars=model.unobserved_RVs[:-1])

## Plot KDE and sampled values for each parameter.
pm.traceplot(trace[burnin::thin])
Esempio n. 25
0
# Use PyMC3 to construct a model context
basic_model = pymc3.Model()
with basic_model:
    # Define our prior belief about the fairness
    # of the coin using a Beta distribution
    theta = pymc3.Beta("theta", alpha=alpha, beta=beta)

    # Define the Bernoulli likelihood function
    y = pymc3.Binomial("y", n=n, p=theta, observed=z)

    # Carry out the MCMC analysis using the Metropolis algorithm
    # Use Maximum A Posteriori (MAP) optimisation as initial value for MCMC
    start = pymc3.find_MAP()

    # Use the Metropolis algorithm (as opposed to NUTS or HMC, etc.)
    step = pymc3.Metropolis()

    # Calculate the trace
    trace = pymc3.sample(iterations,
                         step,
                         start,
                         random_seed=1,
                         progressbar=True)

# Plot the posterior histogram from MCMC analysis
bins = 50
plt.hist(trace["theta"],
         bins,
         histtype="step",
         normed=True,
         label="Posterior (MCMC)",
        y = y + [1] * heads + [0] * (flips-heads)
        coin = coin + [i] * flips


# Specify the model in PyMC
with pm.Model() as model:
# define the hyperparameters
    mu = pm.Beta('mu', 2, 2)
    kappa = pm.Gamma('kappa', 1, 0.1)
    # define the prior
    theta = pm.Beta('theta', mu * kappa, (1 - mu) * kappa, shape=len(N))
    # define the likelihood
    y = pm.Bernoulli('y', p=theta[coin], observed=y)
#   Generate a MCMC chain
    start = pm.find_MAP()  # find a reasonable starting point.
    step1 = pm.Metropolis([theta, mu])
    step2 = pm.NUTS([kappa])
    trace = pm.sample(10000, [step1, step2], start=start, random_seed=(123), progressbar=False)

## Check the results.
burnin = 2000  # posterior samples to discard
thin = 10  # posterior samples to discard

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

## Check for mixing and autocorrelation
pm.autocorrplot(trace[burnin::thin], vars =[mu, kappa])
#pm.autocorrplot(trace, vars =[mu, kappa])
Esempio n. 27
0
import pymc3 as pm
import numpy as np
import pandas as pd

x = np.array([1.1, 1.9, 2.3, 1.8])

model = pm.Model()
with model:
    # priors
    mu = pm.Normal('mu', mu=0, sd=100)  # or tau=0.001 == 1/100**2
    sigma = pm.Uniform('sigma', lower=.1, upper=10)
    # data come from a Gaussian
    x = pm.Normal('x', mu=mu, sd=sigma, observed=x)
    # instantiate sampler
    stepFunc = pm.Metropolis()  # or try pm.NUTS()
    # draw posterior samples (in 4 parallel running chains)
    Nsample = 1000
    Nchains = 4
    traces = pm.sample(Nsample, step=stepFunc, njobs=Nchains)

plotVars = ('mu', 'sigma')
axs = pm.traceplot(traces, vars=plotVars, combined=False)
# plot joint posterior samples
tstr = 'Joint posterior samples'
post = np.vstack([traces['mu'], traces['sigma']])
post = post.transpose()
df = pd.DataFrame(post, columns=plotVars)
ax = df.plot(kind='scatter',
             x=plotVars[0],
             y=plotVars[1],
Esempio n. 28
0
    kappa1 = pm.Gamma('kappa1', alpha=shape_Gamma, beta=rate_Gamma, shape=n_cond)
    a_Beta1 = mu[cond_of_subj] * kappa1[cond_of_subj]
    b_Beta1 = (1 - mu[cond_of_subj]) * kappa1[cond_of_subj]

    #Prior on theta
    theta0 = pm.Beta('theta0', a_Beta0, b_Beta0, shape=n_subj)
    theta1 = pm.Beta('theta1', a_Beta1, b_Beta1, shape=n_subj)
    # if model_index == 0 then sample from theta1 else sample from theta0
    theta = pm.switch(pm.eq(model_index, 0), theta1, theta0)

    # Likelihood:
    y = pm.Binomial('y', p=theta, n=n_trl_of_subj, observed=n_corr_of_subj)

    # Sampling
    step1 = pm.Metropolis([kappa0, kappa1, mu])
    step2 = pm.NUTS([theta0, theta1])
    step3 = pm.ElemwiseCategorical(vars=[model_index],values=[0,1])
    trace = pm.sample(5000, step=[step1, step2, step3], progressbar=False)


# EXAMINE THE RESULTS.
burnin = 500
pm.traceplot(trace)

model_idx_sample = trace['model_index'][burnin:]

pM1 = sum(model_idx_sample == 1) / len(model_idx_sample)
pM2 = 1 - pM1

plt.figure(figsize=(15, 15))
Esempio n. 29
0
    G = pm.Normal('G',
                  mu=np.zeros(num_states - 1),
                  sd=np.ones(num_states - 1) * 10000.,
                  shape=(num_states - 1))

    states = CommitmentProcess('states',
                               PI=PI,
                               Q=Q,
                               renewal_mask=renewal_mask,
                               num_states=num_states,
                               shape=(num_custs, obs_len),
                               testval=states_test_val)
    usage = UsageProcess('usage',
                         alpha=A,
                         th0=th0,
                         G=G,
                         states=states,
                         num_states=num_states,
                         shape=(num_custs),
                         observed=observed_usage)

    start = pm.find_MAP(method='Powell')
    step1 = pm.Metropolis(vars=[r, PI, Q, A, G, th0, usage])
    step2 = pm.CategoricalGibbsMetropolis(vars=[states])
    trace = pm.sample(draws, start=start, step=[step1, step2], chains=chains)

print('saving to ' + args.output_dir)
pm.backends.ndarray.save_trace(trace,
                               directory=args.output_dir,
                               overwrite=True)
Esempio n. 30
0
    order_means_potential = pm.Potential(
        'order_means_potential',
        tt.switch(means[1] - means[0] < 0, -np.inf, 0) +
        tt.switch(means[2] - means[1] < 0, -np.inf, 0))

    # measurement error
    sd = pm.Uniform('sd', lower=0, upper=20)

    # latent cluster of each observation
    category = pm.Categorical('category', p=p, shape=ndata)

    # likelihood for each observed value
    points = pm.Normal('obs', mu=means[category], sd=sd, observed=data)

with model:
    step1 = pm.Metropolis(vars=[p, sd, means])
    step2 = pm.ElemwiseCategorical(vars=[category], values=[0, 1, 2])
    tr = pm.sample(10000, step=[step1, step2])

pm.plots.traceplot(tr, ['p', 'sd', 'means'])
pm.plots.traceplot(tr[5000::5], ['p', 'sd', 'means'])

#Sampling of cluster for individual data point
i = 0
plt.plot(tr['category'][5000::5, i], drawstyle='steps-mid')
plt.axis(ymin=-.1, ymax=2.1)


def cluster_posterior(i=0):
    print('true cluster:', v[i])
    print('  data value:', np.round(data[i], 2))