Exemple #1
0
    def _gamma_mix(self, model, z):
        with model:
            logger.info("Using tau_g_alpha: {}".format(self.tau_g_alpha))
            tau_g = pm.InverseGamma("tau_g",
                                    alpha=self.tau_g_alpha,
                                    beta=1.,
                                    shape=self.n_states)

            logger.info("Using mean_g: {}".format(self.gamma_means))
            if self.n_states == 2:
                logger.info("Building two-state model")
                mean_g = pm.Normal("mu_g",
                                   mu=self.gamma_means,
                                   sd=1,
                                   shape=self.n_states)
                pm.Potential("m_opot",
                             var=tt.switch(mean_g[1] - mean_g[0] < 0., -np.inf,
                                           0.))
            else:
                logger.info("Building three-state model")
                mean_g = pm.Normal("mu_g",
                                   mu=self.gamma_means,
                                   sd=1,
                                   shape=self.n_states)
                pm.Potential(
                    'm_opot',
                    tt.switch(mean_g[1] - mean_g[0] < 0, -np.inf, 0) +
                    tt.switch(mean_g[2] - mean_g[1] < 0, -np.inf, 0))

            gamma = pm.Normal("gamma", mean_g[z], tau_g[z], shape=self.n_genes)

        return tau_g, mean_g, gamma
    def _define_model(self):

        self.model = pm.Model()
        with self.model:


            p = pm.Dirichlet('p', a=np.array([1., 1., 1.]), shape=self.number_of_hidden_states)
            p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))

            means = pm.Normal('means', mu=[0, 0, 0], sd=2.0, shape=self.number_of_hidden_states)

            # break symmetry
            order_means_potential = pm.Potential('order_means_potential',
                                                 tt.switch(means[1] - means[0] < 0, -np.inf, 0)
                                                 + tt.switch(means[2] - means[1] < 0, -np.inf, 0))

            sd = pm.HalfCauchy('sd', beta=2, shape=self.number_of_hidden_states)
            category = pm.Categorical('category',
                                      p=p,
                                      shape=self.number_of_data)

            points = pm.Normal('obs',
                               mu=means[category],
                               sd=sd[category],
                               observed=self.data)
def build_biallelic_model3(g, n, s):
    # EXPERIMENTAL: Observations overdispersed as a BetaBinom w/ concentrations
    # 10.
    a = 2

    with pm.Model() as model:
        # Fraction
        pi = pm.Dirichlet(
            'pi',
            a=np.ones(s),
            shape=(n, s),
            transform=stick_breaking,
        )
        pi_hyper = pm.Data('pi_hyper', value=0.0)
        pm.Potential('heterogeneity_penalty',
                     -(pm.math.sqrt(pi).sum(0).sum()**2) * pi_hyper)

        rho_hyper = pm.Data('rho_hyper', value=0.0)
        pm.Potential('diversity_penalty',
                     -(pm.math.sqrt(pi.sum(0)).sum()**2) * rho_hyper)

        # Genotype
        gamma_ = pm.Uniform('gamma_', 0, 1, shape=(g * s, 1))
        gamma = pm.Deterministic(
            'gamma',
            (pm.math.concatenate([gamma_, 1 - gamma_], axis=1).reshape(
                (g, s, a))))
        gamma_hyper = pm.Data('gamma_hyper', value=0.0)
        pm.Potential(
            'ambiguity_penalty',
            -(pm.math.sqrt(gamma).sum(2)**2).sum(0).sum(0) * gamma_hyper)

        # Product of fraction and genotype
        true_p = pm.Deterministic('true_p', pm.math.dot(pi, gamma))

        # Sequencing error
        epsilon_hyper = pm.Data('epsilon_hyper', value=100)
        epsilon = pm.Beta('epsilon', alpha=2, beta=epsilon_hyper, shape=n)
        epsilon_ = epsilon.reshape((n, 1, 1))
        err_base_prob = tt.ones((n, g, a)) / a
        p_with_error = (true_p * (1 - epsilon_)) + (err_base_prob * epsilon_)

        # Observation
        _p = p_with_error.reshape((-1, a))[:, 0]
        # Overdispersion term
        # alpha = pm.Gamma('alpha', mu=100, sigma=5)
        # TODO: Figure out how to also fit this term.
        # FIXME: Do I want the default to be a valid value?
        #  Realistic or close to asymptotic?
        alpha = pm.Data('alpha', value=1000)

        observed = pm.Data('observed', value=np.empty((g * n, a)))
        pm.BetaBinomial('data',
                        alpha=_p * alpha,
                        beta=(1 - _p) * alpha,
                        n=observed.reshape((-1, a)).sum(1),
                        observed=observed[:, 0])

    return model
Exemple #4
0
def fit_adj_pass_model(successes, attempts):
    ## inputs are two lists in the form:
    ##       successes = [successful long passes, total successful passes]
    ##       attempts = [attempted long passes, total attempted passes]
    ## returns:
    ##      sl, a numpy array of shape (6000,N) containing 6000 posterior samples of success probabilites (N is the number of players in the
    ##      original data frame who have registered non-zero expected succcesses)
    ##      sb, an empty list
    ##      kk, boolean indicating which players have actually registered non-zero expected successes
    ##      'adj_pass', character string indicating the model type.
    import numpy as np
    import pymc3 as pm
    import pymc3.distributions.transforms as tr
    import theano.tensor as tt
    LonCmp = successes[0]
    TotCmp = successes[1]
    LonAtt = attempts[0]
    TotAtt = attempts[1]
    kk = (LonCmp > 0) & np.isfinite(LonAtt)
    LonCmp = LonCmp[kk]
    LonAtt = LonAtt[kk]
    TotCmp = TotCmp[kk]
    TotAtt = TotAtt[kk]
    ShCmp = TotCmp - LonCmp
    ShAtt = TotAtt - LonAtt
    average_long_tendency = np.mean(LonAtt / TotAtt)
    N = np.sum(kk)

    def logp_ab(value):
        ''' prior density'''
        return tt.log(tt.pow(tt.sum(value), -5 / 2))

    with pm.Model() as model:
        # Uninformative prior for alpha and beta
        ab_short = pm.HalfFlat('ab_short',
                               shape=2,
                               testval=np.asarray([1., 1.]))
        ab_long = pm.HalfFlat('ab_long',
                              shape=2,
                              testval=np.asarray([1., 1.]))
        pm.Potential('p(a_s, b_s)', logp_ab(ab_short))
        pm.Potential('p(a_l, b_l)', logp_ab(ab_long))

        lambda_short = pm.Beta('lambda_s', alpha=ab_short[0], beta=ab_short[1], shape=N)
        lambda_long = pm.Beta('lambda_l', alpha=ab_long[0], beta=ab_long[1], shape=N)

        y_short = pm.Binomial('y_s', p=lambda_short, observed=ShCmp, n=ShAtt)
        y_long = pm.Binomial('y_l', p=lambda_short * lambda_long, observed=LonCmp, n=LonAtt)
        approx = pm.fit(n=30000)
    s_sh = approx.sample(6000)['lambda_s']
    s_lo = approx.sample(6000)['lambda_l']
    sl = average_long_tendency * s_lo + (1 - average_long_tendency) * s_sh
    return [sl, [], kk, 'adj_pass']
def build_biallelic_model(g, n, s):
    a = 2

    with pm.Model() as model:
        # Fraction
        pi = pm.Dirichlet(
            'pi',
            a=np.ones(s),
            shape=(n, s),
            transform=stick_breaking,
        )
        pi_hyper = pm.Data('pi_hyper', value=0.0)
        pm.Potential('heterogeneity_penalty',
                     -(pm.math.sqrt(pi).sum(0).sum()**2) * pi_hyper)

        rho_hyper = pm.Data('rho_hyper', value=0.0)
        pm.Potential('diversity_penalty',
                     -(pm.math.sqrt(pi.sum(0)).sum()**2) * rho_hyper)

        # Genotype
        gamma_ = pm.Uniform('gamma_', 0, 1, shape=(g * s, 1))
        gamma = pm.Deterministic(
            'gamma',
            (pm.math.concatenate([gamma_, 1 - gamma_], axis=1).reshape(
                (g, s, a))))
        gamma_hyper = pm.Data('gamma_hyper', value=0.0)
        pm.Potential(
            'ambiguity_penalty',
            -(pm.math.sqrt(gamma).sum(2)**2).sum(0).sum(0) * gamma_hyper)

        # Product of fraction and genotype
        true_p = pm.Deterministic('true_p', pm.math.dot(pi, gamma))

        # Sequencing error
        epsilon_hyper = pm.Data('epsilon_hyper', value=100)
        epsilon = pm.Beta('epsilon', alpha=2, beta=epsilon_hyper, shape=n)
        epsilon_ = epsilon.reshape((n, 1, 1))
        err_base_prob = tt.ones((n, g, a)) / a
        p_with_error = (true_p * (1 - epsilon_)) + (err_base_prob * epsilon_)

        # Observation
        observed = pm.Data('observed', value=np.empty((g * n, a)))
        pm.Binomial('data',
                    p=p_with_error.reshape((-1, a))[:, 0],
                    n=observed.reshape((-1, a)).sum(1),
                    observed=observed[:, 0])

    return model
Exemple #6
0
def gp_fit(t, y, yerr, t_grid, integrated=False, exp_time=60.):
    # optimize kernel hyperparameters and return fit + predictions
    with pm.Model() as model:
        logS0 = pm.Normal("logS0", mu=0.4, sd=5.0, testval=np.log(np.var(y)))
        logw0 = pm.Normal("logw0", mu=-3.9, sd=0.1)
        logQ = pm.Normal("logQ", mu=3.5, sd=5.0)

        # Set up the kernel and GP
        kernel = terms.SHOTerm(log_S0=logS0, log_w0=logw0, log_Q=logQ)
        if integrated:
            kernel_int = terms.IntegratedTerm(kernel, exp_time)
            gp = GP(kernel_int, t, yerr**2)
        else:
            gp = GP(kernel, t, yerr**2)

        # Add a custom "potential" (log probability function) with the GP likelihood
        pm.Potential("gp", gp.log_likelihood(y))

    with model:
        map_soln = xo.optimize(start=model.test_point)
        mu, var = xo.eval_in_model(gp.predict(t_grid, return_var=True),
                                   map_soln)
        sd = np.sqrt(var)
        y_pred = xo.eval_in_model(gp.predict(t), map_soln)

    return map_soln, mu, sd, y_pred
    def test_potential(self):
        with pm.Model():
            x = pm.Normal("x", 0.0, 1.0)
            pm.Potential("z", logpt(pm.Normal.dist(x, 1.0), np.random.randn(10)))
            inference_data = pm.sample(100, chains=2, return_inferencedata=True)

        assert inference_data
Exemple #8
0
def DUMvNormal(*args, **kwargs):
    v = pm.MvNormal(*args, **kwargs)
    global BOUND
    pm.Potential("bound {}".format(BOUND), tt.switch(multiply.reduce(\
        multiply(0 <= v, v <= 1)), 0, -inf))
    BOUND += 1
    return v
Exemple #9
0
    def fit(self, X, B, T):
        n, k = X.shape
        with pymc3.Model() as m:
            beta_sd = pymc3.Exponential(
                'beta_sd', 1.0)  # Weak prior for the regression coefficients
            beta = pymc3.Normal('beta', mu=0, sd=beta_sd,
                                shape=(k, ))  # Regression coefficients
            c = sigmoid(dot(X, beta))  # Conversion rates for each example
            k = pymc3.Lognormal('k', mu=0, sd=1.0)  # Weak prior around k=1
            lambd = pymc3.Exponential('lambd', 0.1)  # Weak prior

            # PDF of Weibull: k * lambda * (x * lambda)^(k-1) * exp(-(t * lambda)^k)
            LL_observed = log(c) + log(k) + log(
                lambd) + (k - 1) * (log(T) + log(lambd)) - (T * lambd)**k
            # CDF of Weibull: 1 - exp(-(t * lambda)^k)
            LL_censored = log((1 - c) + c * exp(-(T * lambd)**k))

            # We need to implement the likelihood using pymc3.Potential (custom likelihood)
            # https://github.com/pymc-devs/pymc3/issues/826
            logp = B * LL_observed + (1 - B) * LL_censored
            logpvar = pymc3.Potential('logpvar', logp.sum())

            self.trace = pymc3.sample(n_simulations=500,
                                      tune=500,
                                      discard_tuned_samples=True,
                                      njobs=1)
            print('done')
        print('done 2')
Exemple #10
0
def build_model(X, treatment_start, treatment_observations):
    time_seen = pd.to_datetime(treatment_start) + pd.DateOffset(
        treatment_observations - 1)
    y = shared(X[:time_seen].values)
    y_switch = shared(X[:time_seen].index < treatment_start)
    with pm.Model() as i1ma1:
        σ = pm.HalfCauchy('σ', beta=2.)
        θ = pm.Normal('θ', 0., sd=2.)
        β = pm.Normal('β', 0., sd=2.)

        y_adj = tt.switch(y_switch, y, y - tt.dot(y, β))

        # ARIMA (0, 1, 1)
        # ---------------
        # (1 - B) y[t] = (1 - θB) ε[t]
        # y[t] - y[t-1] = ε[t] - θ * ε[t-1]
        # ε[t] = y[t] - y[t-1] - θ * ε[t-1]
        def calc_next(y_lag1, y_lag0, ε, θ):
            return y_lag0 - y_lag1 - θ * ε

        # Initial noise guess -- let's just seed with 0
        ε0 = tt.zeros_like(y_adj)

        ε, _ = scan(fn=calc_next,
                    sequences=dict(input=y_adj, taps=[-1, 0]),
                    outputs_info=[ε0],
                    non_sequences=[θ])

        pm.Potential('like', pm.Normal.dist(0, sd=σ).logp(ε))
    return i1ma1
Exemple #11
0
    def build_model(self, time_series):
        with pm.Model() as arma_model:
            self.scaler = MinMaxScaler((1, np.max(time_series)))
            time_series = self.scaler.fit_transform(time_series)
            time_series = np.log(time_series[1:]) - np.log(time_series[:-1])
            time_series = np.squeeze(time_series)
            y = shared(time_series)
            sigma = pm.HalfCauchy('sigma', 5.)
            theta = pm.Normal('theta', 0., sd=2.)
            phi = pm.Normal('phi', 0., sd=2.)
            mu = pm.Normal('mu', 0., sd=10.)

            err0 = y[0] - (mu + phi * mu)

            def calc_next(last_y, this_y, err, mu, phi, theta):
                nu_t = mu + phi * last_y + theta * err
                return this_y - nu_t

            err, _ = scan(fn=calc_next,
                          sequences=dict(input=y, taps=[-1, 0]),
                          outputs_info=[err0],
                          non_sequences=[mu, phi, theta])

            pm.Potential('like', pm.Normal.dist(0, sd=sigma).logp(err))

        return arma_model
Exemple #12
0
def main():
    n = 4
    mu1 = np.ones(n) * (1. / 2)

    with pm.Model() as ATMIP_test:
        X = pm.Uniform('X',
                       shape=n,
                       lower=-2. * np.ones_like(mu1),
                       upper=2. * np.ones_like(mu1),
                       testval=-1. * np.ones_like(mu1))
        kd4 = pm.Lognormal('kd4', mu=np.log(0.3), sd=1)
        #        k_d4 = pm.Lognormal('k_d4', mu=np.log(6E-3), sd=9)
        #        kSOCSon = pm.Lognormal('kSOCSon', mu=np.log(1E-6), sd = 2)
        #        kpa = pm.Lognormal('kpa', mu=np.log(1E-6), sd = 2)
        #        R1 = pm.Uniform('R1', lower=900, upper=5000)
        #        R2 = pm.Uniform('R2', lower=900, upper=5000)
        #        gamma = pm.Uniform('gamma', lower=2, upper=30)

        llk = pm.Potential('llk', two_gaussians(X, kd4))
    with ATMIP_test:
        trace = pm.sample(100, chains=50, step=pm.SMC())
        plt.figure()
        pm.traceplot(trace)
        plt.savefig("mc_testing.pdf")
        s = pm.stats.summary(trace)
        s.to_csv('mcmc_parameter_summary.csv')
Exemple #13
0
    def mixture_model_v0(x, y, std, xsite, ysite):

        with pm.Model() as mixture_model_v0:

            #Prior
            P = pm.Uniform('P', lower=0, upper=1)

            xc = (xsite[0] + xsite[1]) / 2  #x center of the site
            yc = (ysite[0] + ysite[1]) / 2  #y center of the site

            #Photons scattered from the atoms are Gaussian distributed
            atom_x = pm.Normal.dist(mu=xc, sigma=std).logp(x)
            atom_y = pm.Normal.dist(mu=yc, sigma=std).logp(y)
            atom = atom_x + atom_y

            #Photons from the camera background are uniform distributed
            background_x = pm.Uniform.dist(lower=xsite[0],
                                           upper=xsite[1]).logp(x)
            background_y = pm.Uniform.dist(lower=ysite[0],
                                           upper=ysite[1]).logp(y)
            background = background_x + background_y

            #Log-likelihood
            log_like = tt.log(
                (P * tt.exp(atom) + (1 - P) * tt.exp(background)))

            pm.Potential('logp', log_like.sum())

        #MAP value
        map_estimate = pm.find_MAP(model=mixture_model_v0)
        P_value = map_estimate["P"]

        return P_value
def main():
    mu_true = 5.0
    sigma_true = 1.0
    data = np.random.normal(loc=mu_true, scale=sigma_true, size=10000)
    loglike = Loglike(data)
    #utt.verify_grad(loglike, [np.array([3.0, 2.0])])
    # verify_grad passes with no errors
    with pm.Model() as model:
        mu = pm.Normal('mu', mu=4.0, sigma=2.0, testval=4.0)
        sigma = pm.HalfNormal('sigma', sigma=5.0, testval=2.0)
        theta = tt.as_tensor_variable([mu, sigma])
        like = pm.Potential('like', loglike(theta))
    with model:
        trace = pm.sample()
        print(pm.summary(trace).to_string())

        # plot the traces
        _ = az.plot_trace(trace, lines={"mu": mu_true, "sigma": sigma_true})

        # put the chains in an array (for later!)
        samples = np.vstack((trace["mu"], trace["sigma"])).T

        # corner plot
        fig = corner.corner(samples,
                            labels=[r"$mu$", r"$\sigma$"],
                            truths=[mu_true, sigma_true])

        plt.show()
    def fit_mcmc(self, sigma_obs_prior=10, sigma_trend_prior=1e-4,  \
                       sigma_seas_prior=1e-2, sigma_ar_prior=1e-4, \
                       nsam=6000, warmup=1000, chains=4, cores=4):
        with mc.Model() as model:
            sigma_obs = mc.HalfNormal("sigma_obs", sd=sigma_obs_prior)
            sigma_trend = mc.HalfNormal("sigma_trend", sd=sigma_trend_prior)
            sigma_seas = mc.HalfNormal("sigma_seas", sd=sigma_seas_prior)
            sigma_ar = mc.HalfNormal("sigma_ar", sd=sigma_ar_prior)
            rho = mc.Uniform("rho", lower=0, upper=1)

            @theano.compile.ops.as_op(itypes=[tt.dscalar, tt.dscalar, tt.dscalar, tt.dscalar, tt.dscalar], \
                                      otypes=[tt.dvector])
            def loglikelihood(o, l, m, n, s):
                logp = self.mod.loglike([o, l, m, n, s], transformed=True)
                logp = np.array(logp).reshape((1, ))
                return logp

            lgp = mc.Potential(
                'lgp',
                loglikelihood(sigma_obs, sigma_trend, sigma_seas, sigma_ar,
                              rho))
            self.method = mc.step_methods.metropolis.Metropolis()
            self.trace = mc.sample(nsam,
                                   warmup=warmup,
                                   chains=chains,
                                   cores=cores,
                                   step=self.method)
        # posteriori sampled parameters
        self.par_names = [
            'sigma_obs', 'sigma_trend', 'sigma_seas', 'sigma_ar', 'rho'
        ]
        self.post = pd.DataFrame(columns=self.par_names)
        for col in self.par_names:
            self.post[col] = self.trace.get_values(col)
        return self.post
Exemple #16
0
    def setup_class(self):
        super(TestSMC, self).setup_class()
        self.test_folder = mkdtemp(prefix='ATMIP_TEST')

        self.samples = 1500
        self.chains = 200
        n = 4
        mu1 = np.ones(n) * (1. / 2)
        mu2 = -mu1

        stdev = 0.1
        sigma = np.power(stdev, 2) * np.eye(n)
        isigma = np.linalg.inv(sigma)
        dsigma = np.linalg.det(sigma)

        w1 = stdev
        w2 = (1 - stdev)

        def two_gaussians(x):
            log_like1 = - 0.5 * n * tt.log(2 * np.pi) \
                        - 0.5 * tt.log(dsigma) \
                        - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
            log_like2 = - 0.5 * n * tt.log(2 * np.pi) \
                        - 0.5 * tt.log(dsigma) \
                        - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
            return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))

        with pm.Model() as self.ATMIP_test:
            X = pm.Uniform('X', lower=-2, upper=2., shape=n)
            llk = pm.Potential('muh', two_gaussians(X))

        self.muref = mu1
Exemple #17
0
    def setup_class(self):
        super().setup_class()
        self.samples = 1000
        n = 4
        mu1 = np.ones(n) * (1.0 / 2)
        mu2 = -mu1

        stdev = 0.1
        sigma = np.power(stdev, 2) * np.eye(n)
        isigma = np.linalg.inv(sigma)
        dsigma = np.linalg.det(sigma)

        w1 = stdev
        w2 = 1 - stdev

        def two_gaussians(x):
            log_like1 = (-0.5 * n * tt.log(2 * np.pi) - 0.5 * tt.log(dsigma) -
                         0.5 * (x - mu1).T.dot(isigma).dot(x - mu1))
            log_like2 = (-0.5 * n * tt.log(2 * np.pi) - 0.5 * tt.log(dsigma) -
                         0.5 * (x - mu2).T.dot(isigma).dot(x - mu2))
            return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))

        with pm.Model() as self.SMC_test:
            X = pm.Uniform("X", lower=-2, upper=2.0, shape=n)
            llk = pm.Potential("muh", two_gaussians(X))

        self.muref = mu1
Exemple #18
0
def mixture_model(random_seed=1234):
    """Sample mixture model to use in benchmarks"""
    np.random.seed(1234)
    size = 1000
    w_true = np.array([0.35, 0.4, 0.25])
    mu_true = np.array([0.0, 2.0, 5.0])
    sigma = np.array([0.5, 0.5, 1.0])
    component = np.random.choice(mu_true.size, size=size, p=w_true)
    x = np.random.normal(mu_true[component], sigma[component], size=size)

    with pm.Model() as model:
        w = pm.Dirichlet("w", a=np.ones_like(w_true))
        mu = pm.Normal("mu", mu=0.0, sd=10.0, shape=w_true.shape)
        enforce_order = pm.Potential(
            "enforce_order",
            aet.switch(mu[0] - mu[1] <= 0, 0.0, -np.inf) +
            aet.switch(mu[1] - mu[2] <= 0, 0.0, -np.inf),
        )
        tau = pm.Gamma("tau", alpha=1.0, beta=1.0, shape=w_true.shape)
        pm.NormalMixture("x_obs", w=w, mu=mu, tau=tau, observed=x)

    # Initialization can be poorly specified, this is a hack to make it work
    start = {
        "mu": mu_true.copy(),
        "tau_log__": np.log(1.0 / sigma**2),
        "w_stickbreaking__": np.array([-0.03, 0.44]),
    }
    return model, start
Exemple #19
0
def group_static_ucb_mes_model(X, explore_param_alpha=.01, explore_param_beta=.01,
                     temperature_alpha=1., temperature_beta=10., method_alpha=.001, maxk=10, samples=200):
    
    X = X.copy().transpose((2,1,3,0))
    nparticipants = X.shape[3]
    nchoices = X.shape[2]
    ntrials = X.shape[1]
    actions = theano.shared(X[-1])
    mean = theano.shared(X[0])
    var = theano.shared(X[1])
    mes = theano.shared(X[3])
    #mes = theano.shared(X[4])
    random_likelihood = theano.shared((1./nchoices)*np.ones(shape=(ntrials, maxk, nchoices, nparticipants)))
    with pm.Model() as model:
    
        explore_param = pm.Gamma('var_param', explore_param_alpha, explore_param_beta, shape=maxk)
        temperature = pm.Gamma('temperature', temperature_alpha, temperature_beta, shape=maxk)
        method = pm.Dirichlet('method', np.ones(3)*method_alpha, shape=(maxk,3))
        
        alpha = pm.Gamma('alpha', 10**-10., 10**-10.)
        beta = pm.Beta('beta', 1., alpha, shape=maxk)
        weights = pm.Deterministic('w', stick_breaking(beta))
        assignments = pm.Categorical('assignments', weights, shape=nparticipants)
    
        obs = pm.Potential('obs', sparse_group_static_ucb_mes_likelihood(actions, mean, var, mes, random_likelihood, method, explore_param, temperature, assignments, maxk))
        trace = pm.sample(samples, njobs=4)
        return trace
def get_pm3_model(xs: np.ndarray, hyper_params: Dict, verbose: int):
    """Get PyMC3 Model object of cyclic model.

    :type xs: np.ndarray, shape=(n_samples, 2)
    :param xs: samples.
    :type hyper_params: dict
    :param hyper_params: Hyperparameters of the model.
    :type verbose: int
    :param verbose: the verbosity of the output log.
    :return: pm.Model
    """
    n_samples = xs.shape[0]

    # Standardize samples
    xs_ = (xs - xs.mean(axis=0)) / xs.std(axis=0)

    with pm.Model() as model:

        # Regression coefficients
        b_12, b_21 = _get_reg_coefs(hyper_params)

        # Noise variance
        h1, h2 = _noise_variance(hyper_params)

        # Individual specific effects
        L_cov = _get_L_cov(hyper_params)
        u1s, u2s = _indvdl_t(hyper_params, n_samples, L_cov)

        # Causal model
        xs_loglike = _causal_model_loglike(hyper_params, u1s, u2s, b_12, b_21,
                                           h1, h2, n_samples)
        xs_obs = pm.Potential('xs', xs_loglike(xs_))  # @fixme

    return model
Exemple #21
0
    def granulation_model(self):
        peak = self.period_prior()
        x = self.lc.lcf.time
        y = self.lc.lcf.flux
        yerr = self.lc.lcf.flux_err
        with pm.Model() as model:
            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2", mu=2*np.log(np.min(sigmaclip(yerr)[0])), sd=1.0)

            logw0 = pm.Bound(pm.Normal, lower=-0.5, upper=np.log(2 * np.pi / self.min_period))("logw0", mu=0.0, sd=5)
            logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y)), sd=5)
            kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4, log_w0=logw0, Q=1 / np.sqrt(2))

            #GP model
            gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2))

            # Compute the Gaussian Process likelihood and add it into the
    	    # the PyMC3 model as a "potential"
            pm.Potential("loglike", gp.log_likelihood(y - mean))

    	    # Compute the mean model prediction for plotting purposes
            pm.Deterministic("pred", gp.predict())

    	    # Optimize to find the maximum a posteriori parameters
            map_soln = xo.optimize(start=model.test_point)
        return model, map_soln
Exemple #22
0
def VINormal(dim, const_str, const_fx, K, nfit=30000):
    """\
    Normal (full-rank) sampling, fit with ADVI to a
    high-potential probability distribution



    :input dim:       The dimensionality
    :input const_str: Constraint strings; used to define potentials
    :input const_fx:  Constraint callables, included for API compatibility
    :input K:         Number of points to sample
    :input nfit:      Number of gradient iterations for variational inference

    :returns: A set of points X drawn from a N(μ,Σ); where the parameters are fit
              by variational inference to match the potential distribution formed
              by the potentials -c*g_i; for c=7500


    """
    with pm.Model() as mod:
        x = pm.Uniform('x', shape=dim)
        for i, const in enumerate(const_str):
            cname = 'g%d' % i
            g = pm.Deterministic(cname, eval(const, {'__builtins__': None}, {'x': x } ))
            pname = '%s_pot' % cname
            pm.Potential(pname, tt.switch(tt.lt(g, 0), 7500*g, 0))
        fit_res = pm.fit(nfit, method='fullrank_advi', obj_n_mc=3)
        trace = fit_res.sample(K)
    return trace['x']
Exemple #23
0
def MCMC(dim, const_str, const_fx, K, chains=3, cores=3):
    """\
    MCMC sampling, with potentials lowering the probability
    of drawing failing points

    :input dim:       The dimensionality
    :input const_str: Constraint strings; used to define potentials
    :input const_fx:  Constraint callables, included for API compatibility
    :input K:         Number of points to sample
    :input chains:    Number of independent MCMC chains to run
    :input cores:     Number of CPU cores to run for parallelization

    :returns: A set of points X drawn from the potential -c*g_i; for c=[1, 10, 20].
              This involves three successive samplings, which should total K draws.

    """
    lambda_values = [1, 10, 20]
    k = int(K/(chains*len(lambda_values)))
    Xvals = list()
    for lam in lambda_values:
        with pm.Model() as mod:
            x = pm.Uniform('x', shape=dim)
            for i, const in enumerate(const_str):
                cname = 'g%d' % i
                g = pm.Deterministic(cname, eval(const, {'__builtins__': None}, {'x': x } ))
                pname = '%s_pot' % cname
                pm.Potential(pname, tt.switch(tt.lt(g, 0), lam*g, 0))
            trace = pm.sample(k, tune=1000, chains=chains, cores=cores)
        Xvals.append(trace['x'])
    return np.vstack(Xvals)
Exemple #24
0
    def test_sample_n_core(self, n_jobs):
        n_chains = 300
        n_steps = 100
        tune_interval = 25

        n = 4

        mu1 = np.ones(n) * (1. / 2)
        mu2 = -mu1

        stdev = 0.1
        sigma = np.power(stdev, 2) * np.eye(n)
        isigma = np.linalg.inv(sigma)
        dsigma = np.linalg.det(sigma)

        w1 = stdev
        w2 = (1 - stdev)

        def last_sample(x):
            return x[(n_steps - 1)::n_steps]

        def two_gaussians(x):
            log_like1 = - 0.5 * n * tt.log(2 * np.pi) \
                        - 0.5 * tt.log(dsigma) \
                        - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
            log_like2 = - 0.5 * n * tt.log(2 * np.pi) \
                        - 0.5 * tt.log(dsigma) \
                        - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
            return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))

        with pm.Model() as ATMIP_test:
            X = pm.Uniform('X',
                           shape=n,
                           lower=-2. * np.ones_like(mu1),
                           upper=2. * np.ones_like(mu1),
                           testval=-1. * np.ones_like(mu1),
                           transform=None)
            like = pm.Deterministic('like', two_gaussians(X))
            llk = pm.Potential('like_potential', like)

        with ATMIP_test:
            step = smc.SMC(n_chains=n_chains,
                           tune_interval=tune_interval,
                           likelihood_name=ATMIP_test.deterministics[0].name)

        mtrace = smc.ATMIP_sample(n_steps=n_steps,
                                  step=step,
                                  n_jobs=n_jobs,
                                  progressbar=True,
                                  stage='0',
                                  homepath=self.test_folder,
                                  model=ATMIP_test,
                                  rm_flag=True)

        d = mtrace.get_values('X', combine=True, squeeze=True)
        x = last_sample(d)
        mu1d = np.abs(x).mean(axis=0)

        np.testing.assert_allclose(mu1, mu1d, rtol=0., atol=0.03)
Exemple #25
0
    def test_potential(self):
        with pm.Model():
            x = pm.Normal("x", 0.0, 1.0)
            pm.Potential("z", pm.Normal.dist(x, 1.0).logp(np.random.randn(10)))
            trace = pm.sample(100, chains=2)
            inference_data = from_pymc3(trace=trace)

        assert inference_data
Exemple #26
0
    def test_potentials_warning(self):
        warning_msg = "The effect of Potentials on other parameters is ignored during"
        with pm.Model() as m:
            a = pm.Normal("a", 0, 1)
            p = pm.Potential("p", a + 1)

        with m:
            with pytest.warns(UserWarning, match=warning_msg):
                pm.sample_prior_predictive(samples=5)
Exemple #27
0
 def __init__(self, name='', model=None):
     super().__init__(name, model)
     assert pm.modelcontext(None) is self
     # 1) init variables with Var method
     self.Var('v1', pm.Normal.dist())
     self.v2 = pm.Normal('v2', mu=0, sigma=1)
     # 2) Potentials and Deterministic variables with method too
     # be sure that names will not overlap with other same models
     pm.Deterministic('d', tt.constant(1))
     pm.Potential('p', tt.constant(1))
Exemple #28
0
 def __init__(self, name="", model=None):
     super().__init__(name, model)
     assert pm.modelcontext(None) is self
     # 1) init variables with Var method
     self.register_rv(pm.Normal.dist(), "v1")
     self.v2 = pm.Normal("v2", mu=0, sigma=1)
     # 2) Potentials and Deterministic variables with method too
     # be sure that names will not overlap with other same models
     pm.Deterministic("d", at.constant(1))
     pm.Potential("p", at.constant(1))
Exemple #29
0
    def test_potentials_warning(self):
        warning_msg = "The effect of Potentials on other parameters is ignored during"
        with pm.Model() as m:
            a = pm.Normal("a", 0, 1)
            p = pm.Potential("p", a + 1)
            obs = pm.Normal("obs", a, 1, observed=5)

        trace = az.from_dict({"a": np.random.rand(10)})
        with pytest.warns(UserWarning, match=warning_msg):
            pm.sample_posterior_predictive_w(samples=5, traces=[trace, trace], models=[m, m])
Exemple #30
0
    def sample_posterior(self, t, T, n_samp, n_burnin=None):
        """
        Get samples from the posterior, e.g. for posterior inference or computing Bayesian credible intervals.
        This routine samples via the random walk Metropolis (RWM) algorithm using the ``pymc3`` library.

        The function returns a ``pymc3.MultiTrace`` object that can be operated on simply like a ``numpy.array``.
        Furthermore, ``pymc3`` can be used to create "traceplots". For example via

        .. code-block:: python

            from matplotlib import pyplot as plt
            import pymc3

            trace = uvb.fit(t, T)
            pymc3.traceplot(trace["mu"])

            plt.plot(trace["mu"], trace["alpha"])

        :param numpy.array[float] t: Observation timestamps of the process up to time T. 1-d array of timestamps.
            must be sorted (asc)
        :param T: (optional) maximum time
        :type T: float or None
        :param int n_samp: number of posterior samples to take
        :param int n_burnin: number of samples to discard (as the burn-in samples)

        :rtype: pymc3.MultiTrace
        :return: the posterior samples for mu, alpha and theta as a trace object
        """

        t, T = self._prep_t_T(t, T)

        if n_burnin is None:
            n_burnin = int(n_samp / 5)

        with pm.Model() as model:
            mu = pm.Gamma("mu", alpha=self.mu_hyp[0], beta=1. / self.mu_hyp[1])
            theta = pm.Gamma("theta",
                             alpha=self.theta_hyp[0],
                             beta=1. / self.theta_hyp[1])
            alpha = pm.Beta("alpha",
                            alpha=self.alpha_hyp[0],
                            beta=self.alpha_hyp[1])

            op = HPLLOp(t, T)
            a = pm.Deterministic('a', op(mu, alpha, theta))
            llop = pm.Potential('ll', a)

            trace = pm.sample(n_samp,
                              step=pm.Metropolis(),
                              cores=1,
                              nchains=1,
                              tune=n_burnin,
                              discard_tuned_samples=True)

        return trace[n_burnin:]