Example #1
0
    def test_1d_w(self):
        nd = self.nd
        npop = self.npop
        mus = self.mus
        size = 100
        with pm.Model() as model:
            m = pm.NormalMixture("m",
                                 w=np.ones(npop) / npop,
                                 mu=mus,
                                 sigma=1e-5,
                                 comp_shape=(nd, npop),
                                 shape=nd)
            z = pm.Categorical("z", p=np.ones(npop) / npop)
            latent_m = pm.Normal("latent_m",
                                 mu=mus[..., z],
                                 sigma=1e-5,
                                 shape=nd)

        m_val = m.random(size=size)
        latent_m_val = latent_m.random(size=size)
        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 comes from the same mixture
        # component
        assert all(np.all(np.diff(m_val) < 1e-3, axis=-1))
        assert all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))

        self.samples_from_same_distribution(m_val, latent_m_val)
        self.logp_matches(m, latent_m, z, npop, model=model)
Example #2
0
def mixture_model(random_seed=1234):
    """Sample mixture model to use in benchmarks"""
    np.random.seed(1234)
    size = 1000
    w_true = np.array([0.35, 0.4, 0.25])
    mu_true = np.array([0.0, 2.0, 5.0])
    sigma = np.array([0.5, 0.5, 1.0])
    component = np.random.choice(mu_true.size, size=size, p=w_true)
    x = np.random.normal(mu_true[component], sigma[component], size=size)

    with pm.Model() as model:
        w = pm.Dirichlet("w", a=np.ones_like(w_true))
        mu = pm.Normal("mu", mu=0.0, sd=10.0, shape=w_true.shape)
        enforce_order = pm.Potential(
            "enforce_order",
            aet.switch(mu[0] - mu[1] <= 0, 0.0, -np.inf) +
            aet.switch(mu[1] - mu[2] <= 0, 0.0, -np.inf),
        )
        tau = pm.Gamma("tau", alpha=1.0, beta=1.0, shape=w_true.shape)
        pm.NormalMixture("x_obs", w=w, mu=mu, tau=tau, observed=x)

    # Initialization can be poorly specified, this is a hack to make it work
    start = {
        "mu": mu_true.copy(),
        "tau_log__": np.log(1.0 / sigma**2),
        "w_stickbreaking__": np.array([-0.03, 0.44]),
    }
    return model, start
Example #3
0
    def test_2d_w(self):
        nd = self.nd
        npop = self.npop
        mus = self.mus
        size = 100
        with pm.Model() as model:
            m = pm.NormalMixture(
                "m",
                w=np.ones((nd, npop)) / npop,
                mu=mus,
                sigma=1e-5,
                comp_shape=(nd, npop),
                shape=nd,
            )
            z = pm.Categorical("z", p=np.ones(npop) / npop, shape=nd)
            mu = tt.as_tensor_variable([mus[i, z[i]] for i in range(nd)])
            latent_m = pm.Normal("latent_m", mu=mu, sigma=1e-5, shape=nd)

        m_val = m.random(size=size)
        latent_m_val = latent_m.random(size=size)
        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 can come from independent
        # components
        assert not all(np.all(np.diff(m_val) < 1e-3, axis=-1))
        assert not all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))

        self.samples_from_same_distribution(m_val, latent_m_val)
        self.logp_matches(m, latent_m, z, npop, model=model)
Example #4
0
    def _models(self):
        model = []

        # loop over the number of models appending to the list
        for i in range(self.D):
            with pm.Model() as mdl:
                alpha = pm.Normal('alpha', 0., 5., shape=self.K)

                # v = norm_cdf(alpha + beta * x_shared)
                # w = pm.Deterministic('w', stick_breaking(v))

                beta = []
                for i in range(self.F):
                    beta.append(
                        pm.Normal('beta{}'.format(i), 0., 5., shape=self.K))

                if self.F == 1:
                    v_med = beta[0] * self.x_shared
                else:
                    v_med = beta[0] * self.x_shared[:, 0]

                for i in range(1, self.F):
                    v_med += beta[i] * self.x_shared[:, i]

                v = self.norm_cdf(alpha + v_med)
                w = pm.Deterministic('w', self.stick_breaking(v))

                # offset
                gamma = pm.Normal('gamma', 0., 10., shape=self.K)
                delta = []
                for i in range(self.F):
                    delta.append(
                        pm.Normal('delta{}'.format(i), 0., 5., shape=self.K))

                if self.F == 1:
                    d_med = delta[0] * self.x_shared
                else:
                    d_med = delta[0] * self.x_shared[:, 0]

                for i in range(1, self.F):
                    d_med += delta[i] * self.x_shared[:, i]

                mu = pm.Deterministic('mu', gamma + d_med)

                tau = pm.Gamma('tau', 1., 1., shape=self.K)
                obs = pm.NormalMixture('obs',
                                       w,
                                       mu,
                                       tau=tau,
                                       observed=self.yt[:, i])

            model.append(mdl)

        return model
Example #5
0
def density(x):
    """
    输入: 一个list, 是一系列一维样本
    输出: 基于DP的density估计, 范围-3到3
    """
    values = x
    values = np.array(values)
    values = (values - values.mean()) / values.std()

    N = len(values)
    K = 30
    SEED = int(time.time())
    x_plot = np.linspace(-3, 3, 200)

    def stick_breaking(beta):
        portion_remaining = tt.concatenate([[1],
                                            tt.extra_ops.cumprod(1 - beta)[:-1]
                                            ])
        return beta * portion_remaining

    with pm.Model() as model:
        alpha = pm.Gamma('alpha', 1., 1.)
        beta = pm.Beta('beta', 1., alpha, shape=K)
        w = pm.Deterministic('w', stick_breaking(beta))

        tau = pm.Gamma('tau', 1., 1., shape=K)
        lambda_ = pm.Uniform('lambda', 0, 5, shape=K)
        mu = pm.Normal('mu', 0, tau=lambda_ * tau, shape=K)
        obs = pm.NormalMixture('obs',
                               w,
                               mu,
                               tau=lambda_ * tau,
                               observed=values)

    with model:
        trace = pm.sample(1000, random_seed=SEED, init='advi')

    fig, ax = plt.subplots(figsize=(8, 6))
    plot_w = np.arange(K) + 1
    ax.bar(plot_w - 0.5, trace['w'].mean(axis=0), width=1., lw=0)
    ax.set_xlim(0.5, K)
    ax.set_xlabel('Component')
    ax.set_ylabel('Posterior expected mixture weight')

    post_pdf_contribs = sp.stats.norm.pdf(
        np.atleast_3d(x_plot), trace['mu'][:, np.newaxis, :],
        1. / np.sqrt(trace['lambda'] * trace['tau'])[:, np.newaxis, :])
    post_pdfs = (trace['w'][:, np.newaxis, :] * post_pdf_contribs).sum(axis=-1)
    post_pdf_low, post_pdf_high = np.percentile(post_pdfs, [2.5, 97.5], axis=0)
    return post_pdfs
Example #6
0
    def fit_infinite_mixture_model(coverage_dist, K, number_of_chains_to_use,
                                   number_of_iterations, burn_period):
        covdist_standard = (coverage_dist -
                            coverage_dist.mean()) / coverage_dist.std()
        N = len(covdist_standard)

        __gc.collect()

        with __pm.Model() as model:
            alpha = __pm.Gamma('alpha', 1., 1.)
            beta = __pm.Beta('beta', 1., alpha, shape=K)
            w = __pm.Deterministic('w', stick_breaking(beta))

            tau = __pm.Gamma('tau', 1., 1., shape=K)
            lambda_ = __pm.Uniform('lambda', 0, 5, shape=K)
            mu = __pm.ExGaussian('mu',
                                 mu=-4,
                                 sigma=__np.sqrt(1 / (lambda_ * tau)),
                                 nu=5,
                                 shape=K)
            obs = __pm.NormalMixture('obs',
                                     w,
                                     mu,
                                     tau=lambda_ * tau,
                                     observed=covdist_standard)

        # logger = __logging.getLogger("pymc3")
        # logger.propagate = False

        __logging.getLogger("pymc3").setLevel(__logging.WARNING)

        with model:
            step1 = __pm.Metropolis(vars=[alpha, beta, tau, lambda_, mu],
                                    verbose=0)

            tr = __pm.sample(draw=number_of_iterations - burn_period,
                             tune=burn_period,
                             step=[step1],
                             njobs=number_of_chains_to_use,
                             progressbar=False,
                             verbose=0,
                             compute_convergence_checks=False)

        # trace = tr[burn_period:]
        # return trace
        return tr
Example #7
0
def main():
    DATA_URI = 'http://www.stat.cmu.edu/~larry/all-of-nonpar/=data/lidar.dat'

    def standardize(x):
        return (x - x.mean()) / x.std()

    df = (pd.read_csv(DATA_URI, sep=' *', engine='python').assign(
        std_range=lambda df: standardize(df.range),
        std_logratio=lambda df: standardize(df.logratio)))

    N, _ = df.shape
    K = 20

    std_range = df.std_range.values[:, np.newaxis]
    std_logratio = df.std_logratio.values[:, np.newaxis]

    x_lidar = shared(std_range, broadcastable=(False, True))

    with pm.Model() as model:
        alpha = pm.Normal('alpha', 0., 5., shape=K)
        beta = pm.Normal('beta', 0., 5., shape=K)
        v = norm_cdf(alpha + beta * x_lidar)
        w = pm.Deterministic('w', stick_breaking(v))

    print('defined dirichlet priors')

    with model:
        gamma = pm.Normal('gamma', 0., 10., shape=K)
        delta = pm.Normal('delta', 0., 10., shape=K)
        mu = pm.Deterministic('mu', gamma + delta * x_lidar)

    print('defined lm')

    with model:
        tau = pm.Gamma('tau', 1., 1., shape=K)
        obs = pm.NormalMixture('obs', w, mu, tau=tau, observed=std_logratio)

    SAMPLES = 20000
    BURN = 10000

    with model:
        step = pm.Metropolis()
        trace = pm.sample(SAMPLES, step, chains=1, tune=BURN, random_seed=SEED)

    return
Example #8
0
def periodprior(t, y, detrend_order=2, min_period=0.5):
    detrended_y = y - trend(t, y, detrend_order)
    lag, power = autocorr(t, detrended_y, min_period=min_period)
    pks, w, sd = candidates(lag, power)
    #w[pks > max(lag)/2] = 0
    w = w / np.sum(w)
    mu = pks
    if len(pks) == 0:
        try:
            dist = pm.Uniform("P", lower=0.0, upper=max(lag) / 2)
        except TypeError:
            dist = pm.Uniform.dist(lower=0.0, upper=max(lag) / 2)
        return dist
    try:
        dist = pm.NormalMixture("P",
                                w=w,
                                mu=mu,
                                sd=sd,
                                testval=mu[np.argmax(w)])
    except (TypeError, ValueError) as e:
        print(e)
        dist = pm.NormalMixture.dist(w=w, mu=mu, sd=sd)
    return dist
Example #9
0
def trend_model(y_old, y_new):
    # PyMC3 trend changepoint model
    # trend is modeled by Normal RVs
    # y_old: older data points since the last changepoint
    # y_new: last win(10) datapoints
    g_new = np.gradient(y_new)                     # observed trend
    g_old = np.gradient(y_old) if len(y_old) > 1 else g_new
    mu_new = g_new.mean() if len(g_new) > 0 else None
    mu_old = g_old.mean() if len(g_old) > 0 else mu_new
    sigma_new = max(1.0, g_new.std()) if len(g_new) > 0 else None
    sigma_old = max(1.0, g_old.std()) if len(g_old) > 0 else sigma_new
    y_ = np.concatenate((y_old, y_new))
    y_obs = theano.shared(y_)
    ts = np.array(range(1, 1 + len(y_)))  # start from 1 to deal with intercept
    t_arr = np.array([ts, ts]).T

    with pm.Model() as model:
        w = pm.Dirichlet('w', a=np.ones(2))
        mu = pm.Normal('mu', np.array([mu_old, mu_new]), np.array([sigma_old, sigma_new]), shape=(2,))
        mu_t = pm.Deterministic('mu_t', t_arr * mu)
        tau = pm.Gamma('tau', 1.0, 1.0, shape=2)
        diff = pm.Deterministic('diff', mu[1] - mu[0])                    # needed for PyMC3 model
        obs = pm.NormalMixture('obs', w, mu_t, tau=tau, observed=y_obs)   # needed for PyMC3 model
    return model
                                        tt.extra_ops.cumprod(1 - beta)[:-1]])

    return beta * portion_remaining


with pm.Model() as model:
    alpha = pm.Gamma('alpha', 1., 1.)
    beta = pm.Beta('beta', 1., alpha, shape=K)
    w = pm.Deterministic('w', stick_breaking(beta))

    tau = pm.Gamma('tau', 1., 1., shape=K)
    lambda_ = pm.Uniform('lambda', 0, 5, shape=K)
    mu = pm.Normal('mu', 0, tau=lambda_ * tau, shape=K)
    obs = pm.NormalMixture('obs',
                           w,
                           mu,
                           tau=lambda_ * tau,
                           observed=old_faithful_df.std_waiting.values)

with model:
    trace = pm.sample(1000, random_seed=SEED, init='advi')
""" GEM(alpha) first 5 scalars """
for i in range(5):
    pm.traceplot(trace['w'][:, i])
"""
Doubts:

1. We are using a truncated version of GEM(alpha). How would we extend to an
arbitrarily large K?
2. Technically, all we used here is GEM(alpha) and not the Dirichlet process.
Note that we used an independent Normal distribution to infer about the means
Example #11
0
std_devs = [2, 2, 2]

mix = np.random.normal(np.repeat(means, n_cluster),
                       np.repeat(std_devs, n_cluster))
"""
sns.kdeplot(np.array(mix))
plt.xlabel('$x$', fontsize=14)
plt.savefig('img701.png')
"""

with pm.Model() as model_mg:
    p = pm.Dirichlet('p', a=np.ones(clusters))

    means = pm.Normal('means', mu=[10, 25, 30], sd=2, shape=clusters)
    sd = pm.HalfCauchy('sd', 5)
    y = pm.NormalMixture('y', w=p, mu=means, sd=sd, observed=mix)
    trace_mg = pm.sample(10000, njobs=1)

chain_mg = trace_mg[1000:]
varname_mg = ['means', 'sd', 'p']
pm.traceplot(chain_mg, varname_mg)
#pm.traceplot(chain_mg)
plt.savefig('img705_b.png')

#pm.summary(chain_mg, varname_mg)

plt.figure()
ppc = pm.sample_ppc(chain_mg, 100, model_mg)
#print(ppc)
#for i in ppc['y']:
#	sns.kdeplot(i, alpha=0.1, color='b')
Example #12
0
                        np.sort(data[:, channel])[range(
                            int(np.round(len(data[:, channel]) / 2)))]),
                     np.mean(
                         np.sort(data[:, channel])[range(
                             int(np.round(len(data[:, channel]) * 0.95)),
                             len(data[:, channel]))]))),
                sigma=np.array((0.33, 1)),
                shape=2)
            sigmas = pm.Gamma('sigma',
                              mu=np.array((0.25, 1)),
                              sigma=np.array((0.25, 1)),
                              shape=2)
            prior = sample_prior(samples=1000)
            x = pm.NormalMixture('x_obs',
                                 w,
                                 mus,
                                 sigma=sigmas,
                                 observed=data[:, channel])

        # Sample:
        #with model:
        #    %time hmc_trace = pm.sample(draws=300, tune=700, cores=10)

        # Fit:
        with model:
            advi_fit = pm.fit(n=3000,
                              obj_optimizer=pm.adagrad(learning_rate=1e-1),
                              method='advi')

        # Show results MCMC
        #pm.traceplot(hmc_trace)
Example #13
0
                             shape=(K, ))
    weight_beta = pm.Normal(name='weight_beta',
                            mu=normal_prior_mu,
                            sd=normal_prior_sd,
                            shape=(K, ))
    # Reshape into row vectors
    weight_alpha_row = tt.reshape(weight_alpha, (1, K))
    weight_beta_row = tt.reshape(weight_beta, (1, K))

    # The weighting factors are a softmax of weight_alpha + x*weight_beta; shape (N,K)
    weight = pm.Deterministic(
        'weight', softmax(weight_alpha_row + tt.dot(xt, weight_beta_row)))

    # Sample points using a pymc3 NormalMixture
    # See lecture notes 25, p. 44
    y_obs = pm.NormalMixture('y_obs', w=weight, mu=mu, sd=sigma, observed=y)

# *************************************************************************************************
# A2 Fit this model variationally for about 50,000 iterations using the adam optimizer.
# (obj_optimizer=pm.adam())
# Plot the ELBO to make sure you have converged.
# Print summaries and traceplots for the means, σ's and probabilities.

# Number of iterations for ADVI fit
num_iters: int = 50000

# Fit the model using ADVI
# Tried to fit using FullRankADVI as well; results were horrible
try:
    advi = vartbl['advi']
    print(f'Loaded ADVI fit for Gaussian Mixture Model.')
Example #14
0
import pymc3 as pm
import numpy as np
data = 10 * np.ones(10)
SEED = 1

N = 1000

W = np.array([0.35, 0.4, 0.25])

MU = np.array([0., 2., 5.])
SIGMA = np.array([0.5, 0.5, 1.])

component = np.random.choice(MU.size, size=N, p=W)
x = np.random.normal(MU[component], SIGMA[component], size=N)

with pm.Model() as model:
    w = pm.Dirichlet('w', np.ones_like(W))

    mu = pm.Normal('mu', 0., 10., shape=W.size)
    tau = pm.Gamma('tau', 1., 1., shape=W.size)

    x_obs = pm.NormalMixture('x_obs', w, mu, tau=tau, observed=x)

with model:
    trace = pm.sample(5000, n_init=10000, tune=1000, random_seed=SEED)[1000:]
Example #15
0
    def __init__(self,
                 mu_data,
                 tau_data,
                 prior="Gaussian",
                 parameters={
                     "location": None,
                     "scale": None
                 },
                 hyper_alpha=[100, 10],
                 hyper_beta=[10],
                 hyper_gamma=None,
                 hyper_delta=None,
                 transformation="mas",
                 parametrization="non-central",
                 name='1D',
                 model=None):
        super().__init__(name, model)

        #------------------- Data ------------------------------------------------------
        self.N = len(mu_data)

        if self.N == 0:
            sys.exit(
                "Data has length zero!. You must provide at least one data point"
            )

        #-------------------------------------------------------------------------------

        #============= Transformations ====================================

        if transformation is "mas":
            Transformation = Iden

        elif transformation is "pc":
            Transformation = pc2mas

        else:
            sys.exit("Transformation is not accepted")

        if parametrization == "non-central":
            print("Using non central parametrization.")
        else:
            print("Using central parametrization.")
        #==================================================================

        #================ Hyper-parameters =====================================
        if hyper_delta is None:
            shape = ()
        else:
            shape = len(hyper_delta)

        #------------------------ Location ----------------------------------
        if parameters["location"] is None:
            pm.Normal("loc",
                      mu=hyper_alpha[0],
                      sigma=hyper_alpha[1],
                      shape=shape)

        else:
            self.loc = parameters["location"]

        #------------------------ Scale ---------------------------------------
        if parameters["scale"] is None:
            pm.Gamma("scl", alpha=2.0, beta=2.0 / hyper_beta[0], shape=shape)
        else:
            self.scl = parameters["scale"]
        #========================================================================

        #================= True values ========================================================
        #--------- Cluster oriented prior-----------------------------------------------
        if prior is "Uniform":
            if parametrization == "central":
                pm.Uniform("source",
                           lower=self.loc - self.scl,
                           upper=self.loc + self.scl,
                           shape=self.N)
            else:
                pm.Uniform("offset", lower=-1., upper=1., shape=self.N)
                pm.Deterministic("source", self.loc + self.scl * self.offset)

        elif prior is "Gaussian":
            if parametrization == "central":
                pm.Normal("source", mu=self.loc, sd=self.scl, shape=self.N)
            else:
                pm.Normal("offset", mu=0.0, sd=1.0, shape=self.N)
                pm.Deterministic("source", self.loc + self.scl * self.offset)

        elif prior is "GMM":
            # break symmetry and avoids inf in advi
            pm.Potential('order_means',
                         tt.switch(self.loc[1] - self.loc[0] < 0, -1e20, 0))

            if parameters["weights"] is None:
                pm.Dirichlet("weights", a=hyper_delta, shape=shape)
            else:
                self.weights = parameters["weights"]

            if parametrization == "central":
                pm.NormalMixture("source",
                                 w=self.weights,
                                 mu=self.loc,
                                 sigma=self.scl,
                                 comp_shape=1,
                                 shape=self.N)
            else:
                pm.Normal("offset", mu=0.0, sd=1.0, shape=self.N)
                # latent cluster of each observation
                component = pm.Categorical("component",
                                           p=self.weights,
                                           shape=self.N)
                pm.Deterministic(
                    "source",
                    self.loc[component] + self.scl[component] * self.offset)

        elif prior is "EFF":
            if parameters["gamma"] is None:
                pm.Gamma("x", alpha=2.0, beta=2.0 / hyper_gamma[0])
                pm.Deterministic("gamma", 1.0 + self.x)
            else:
                self.gamma = parameters["gamma"]

            if parametrization == "central":
                EFF("source",
                    location=self.loc,
                    scale=self.scl,
                    gamma=self.gamma,
                    shape=self.N)
            else:
                EFF("offset",
                    location=0.0,
                    scale=1.0,
                    gamma=self.gamma,
                    shape=self.N)
                pm.Deterministic("source", self.loc + self.scl * self.offset)

        elif prior is "King":
            if parameters["rt"] is None:
                pm.Gamma("x", alpha=2.0, beta=2.0 / hyper_gamma[0])
                pm.Deterministic("rt", 1.0 + self.x)
            else:
                self.rt = parameters["rt"]

            if parametrization == "central":
                King("source",
                     location=self.loc,
                     scale=self.scl,
                     rt=self.rt,
                     shape=self.N)
            else:
                King("offset",
                     location=0.0,
                     scale=1.0,
                     rt=self.rt,
                     shape=self.N)
                pm.Deterministic("source", self.loc + self.scl * self.offset)

        #---------- Galactic oriented prior ---------------------------------------------
        elif prior is "EDSD":
            EDSD("source", scale=self.scl, shape=self.N)

        else:
            sys.exit("The specified prior is not implemented")
        #-----------------------------------------------------------------------------
        #=======================================================================================
        # print_ = tt.printing.Print("source")(self.source)
        #----------------- Transformations ----------------------
        true = Transformation(self.source)

        #----------------------- Likelihood ----------------------------------------
        pm.MvNormal('obs', mu=true, tau=tau_data, observed=mu_data)
Example #16
0
x_lidar = shared(std_range, broadcastable=(False, True))

with pm.Model() as model:
    alpha = pm.Normal('alpha', 0., 5., shape=K)
    beta = pm.Normal('beta', 0., 5., shape=K)
    v = norm_cdf(alpha + beta * x_lidar)
    w = pm.Deterministic('w', stick_breaking(v))

with model:
    gamma = pm.Normal('gamma', 0., 10., shape=K)
    delta = pm.Normal('delta', 0., 10., shape=K)
    mu = pm.Deterministic('mu', gamma + delta * x_lidar)

with model:
    tau = pm.Gamma('tau', 1., 1., shape=K)
    obs = pm.NormalMixture('obs', w, mu, tau=tau, observed=std_logratio)

SAMPLES = 5
BURN = 1

with model:
    step = pm.Metropolis()
    trace = pm.sample(SAMPLES, step, chains=1, tune=BURN, random_seed=SEED)

fig, ax = plt.subplots(figsize=(8, 6))

ax.bar(np.arange(K) + 1, trace['w'].mean(axis=0).max(axis=0))

ax.set_xlim(1 - 0.5, K + 0.5)
ax.set_xticks(np.arange(0, K, 2) + 1)
ax.set_xlabel('Mixture component')
Example #17
0
)

cs_exp = cs['exp']
az.plot_kde(cs_exp)
plt.hist(cs_exp, density=True, bins=30, alpha=.3)
plt.yticks([])

# %%
clusters = 2

with pm.Model() as model_mg:
    p = pm.Dirichlet('p', a=np.ones(clusters))
    means = pm.Normal('means', mu=cs_exp.mean(), sd=10, shape=clusters)
    sd = pm.HalfNormal('sd', sd=10)

    y = pm.NormalMixture('y', w=p, mu=means, sd=sd, observed=cs_exp)
    trace_mg = pm.sample(random_seed=123)

# %%
varnames = ['means', 'p']
az.plot_trace(trace_mg, varnames)

# %%
az.summary(trace_mg, varnames)

# %%
clusters = 2

with pm.Model() as model_mgp:
    p = pm.Dirichlet('p', a=np.ones(clusters))
    means = pm.Normal('means',
Example #18
0
    def MCMC_MC_sampler(data,
                        burn_in,
                        test,
                        tol,
                        num_cluster=None):  # Mixture Weibull sampler

        with pm.Model() as model:

            w = pm.Dirichlet('w', np.ones_like(W))

            mu = pm.Gamma('mu', 0., 10., shape=W.size)
            tau = pm.Gamma('tau', 1., 1., shape=W.size)

            x_obs = pm.NormalMixture('x_obs', w, mu, tau=tau, observed=x)

        # Initialization
        num_data = len(data)
        if num_cluster == None:
            num_cluster = 2
        count = 0

        # Posterior Distribution sum(w_model*(theta*(s)**(alpha-1)*alpha*exp(-theta*(s)**alpha))
        theta_alpha = 0.01
        theta_beta = 0.001
        alpha_alpha = 1
        alpha_beta = 0.2
        w_fa = np.ones(num_cluster)
        origin_w_fa = w_fa[:]
        w_model = np.random.dirichlet(w_fa)
        theta = np.random.gamma(theta_alpha, theta_beta, num_cluster)
        alpha = np.random.gamma(alpha_alpha, alpha_beta, num_cluster)
        likelihood_record = []
        w_record = []
        theta_record = []
        alpha_record = []

        while count <= (burn_in + test):

            count += 1
            log_likelihood = 0
            cluster_data = [[] for i in range(num_cluster)]
            new_theta = [[] for i in range(num_cluster)]
            new_alpha = [[] for i in range(num_cluster)]

            # Update of P(z)
            #import pdb; pdb.set_trace()
            for i in range(num_data):

                pz_list = np.array([
                    w_model[k] *
                    (theta[k] * alpha[k] * data[i]**(alpha[k] - 1)) *
                    exp(-theta[k] * data[i]**(alpha[k]))
                    for k in range(num_cluster)
                ])

                mv_pz, ind_pz = max([(mv_pz, ind_pz)
                                     for ind_pz, mv_pz in enumerate(pz_list)])
                cluster_data[ind_pz].append(data[i])

                if pz_list[ind_pz] == 0:
                    log_likelihood += -1 / tol
                else:
                    log_likelihood += log(pz_list[ind_pz])

            # Update of w_model
            #import pdb; pdb.set_trace()
            for i in range(num_cluster):

                w_fa[i] = origin_w_fa[i] + len(cluster_data[i])

            w_model = np.random.dirichlet(w_fa)

            # Update of theta
            import pdb
            pdb.set_trace()
            for i in range(num_cluster):

                new_theta_alpha = len(cluster_data[i]) + theta_alpha
                new_theta_beta = theta_beta + sum([
                    cluster_data[i][k]**(alpha[i])
                    for k in range(len(cluster_data[i]))
                ])
                new_theta[i] = np.random.gamma(new_theta_alpha, new_theta_beta)

            theta = new_theta[:]

            # Update of alpha (sampled from Motroplis Hasting/Rejection support)
            #import pdb; pdb.set_trace()
            for i in range(num_cluster):

                p_alpha = lambda x: x**(len(cluster_data[
                    i]) + alpha_alpha - 1) * exp(x * sum([
                        log(cluster_data[i][k])
                        for k in range(len(cluster_data[i]))
                    ]) - theta[i] * sum([
                        cluster_data[i][k]**(x)
                        for k in range(len(cluster_data[i]))
                    ]) - x * alpha_beta)

                new_alpha[i] = MCMC.slice_sampler(p_alpha,
                                                  alpha[i],
                                                  left_bound=0)

            alpha = new_alpha[:]

            # Record the sampling after burn-in
            if count >= burn_in:

                w_record.append(w_model[:])
                theta_record.append(theta[:])
                alpha_record.append(alpha[:])

            likelihood_record.append(log_likelihood)

        return w_record, theta_record, alpha_record, likelihood_record
Example #19
0
def run_One_d_Model(data,
                    K=3,
                    mus=None,
                    mc_samples=10000,
                    jobs=1,
                    n_cols=10,
                    n_rows=100,
                    neigs=1):
    def logp_simple(mus, category, aux3):
        def logp_(value):
            spatial_factor = 2
            aux = tt.ones((n_samples, ))
            logps = tt.zeros((n_samples))
            sumlogps = tt.zeros((K, n_samples))
            pi = tt.sum(tt.eq(aux3, (aux * category).reshape((n_samples, 1))),
                        axis=1) / 8.0
            #TODO son logps y sumlops siempre sustituidos en todos lo valortes
            for i, label in enumerate(range(K)):
                pi_l = tt.sum(tt.eq(aux3, (aux * label).reshape(
                    (n_samples, 1))),
                              axis=1) / 8.0
                sumlogps = tt.set_subtensor(sumlogps[i, :],
                                            (mus[label].logp(value)) +
                                            (pi_l - 1) * spatial_factor)
            sumlogps = tt.sum(sumlogps, axis=0)

            for label in range(K):
                indx = tt.eq(category, tt.as_tensor_variable(label)).nonzero()
                logps = tt.set_subtensor(
                    logps[indx], (mus[label].logp(value)[indx]) +
                    (pi[indx] - 1) * spatial_factor - sumlogps[indx])
            return logps

    n_samples, n_feats = data.shape
    n_samples = n_cols * n_rows
    max_neigs = 4 * neigs * (neigs + 1)
    #print max_neigs
    to_fill = indxs_neigs(range(n_samples),
                          n_cols=n_cols,
                          n_rows=n_rows,
                          n=neigs)
    inds = np.where(to_fill != -1)[0]
    to_fill = to_fill[to_fill != -1]
    aux = tt.ones(n_samples * max_neigs) * -69
    shp = (K, n_feats)
    mus_start = np.percentile(data, np.linspace(1, 100, K), axis=0)
    alpha = 0.1 * np.ones((n_samples, K))

    with pm.Model() as model:

        mu = pm.Normal('mus',
                       100,
                       mus_start,
                       shape=K,
                       testval=mus_start,
                       transform=Ordered())
        sd = pm.Uniform('sds', lower=0., upper=150., shape=K)

        #pi = Dirichlet('pi', a = alpha, shape= (n_samples, K) )
        pi = Dirichlet('pi', a=alpha, shape=K)

        category = pm.Categorical('category', p=pi, shape=n_samples)
        shit_max = pm.Deterministic('shit_max', tt.max(category))
        shit_min = pm.Deterministic('shit_min', tt.min(category))
        x = pm.NormalMixture()
    # Run GaussianMixture model for each section and channel separatly:

    data = kptn_data_log
    n_samples = np.shape(data)[0]
    dimensions = np.shape(data)[1]
    alpha = np.array(((2,1,1),(10,1,1), (10,1,1), (10,1,1), (1,1,1)))
    
    channel = 4
    section = sections[i]
    
    with pm.Model() as model:
        w = pm.Dirichlet('w', alpha[channel])
        mus = pm.Normal('mu', mu = np.array((np.mean(np.sort(data[sectionNumber == section,channel])[:1000])+0.5, np.mean(np.sort(data[sectionNumber == section,channel])[:1000]) + 2, np.mean(np.sort(data[sectionNumber == section,channel])[:1000]) + 2)), sigma = np.array((0.1,1,1)), shape = 3)
        sigmas = pm.Gamma('sigma', mu = np.array((0.1,1,1)), sigma = np.array((0.1,1,1)), shape = 3)
        prior = sample_prior(samples = 1000)
        x = pm.NormalMixture('x_obs', w, mus, sigma = sigmas, observed=data[sectionNumber == section,channel])

        # Plot prior for some parameters:
        plt.hist(prior['mu'])
        plt.show()

        plt.hist(prior['sigma'])
        plt.show() 

        plt.hist(prior['w'])
        plt.show()

        # Fit:
        with model:
            advi_fit = pm.fit(n=2000, obj_optimizer=pm.adagrad(learning_rate=1e-1), method = 'advi')  
import numpy as np

# create data
sample = np.hstack([np.random.randn(100), np.random.rand(100)])

# Model
with pm.Model() as m:

    # Define parameters as random variables with prior
    mu = pm.Normal('mu')
    sd = pm.HalfNormal('sd', 1)
    condition = pm.Normal.dist(mu, sd)
    rn = pm.Uniform.dist(-5., 5.)

    # likelihood as mixture distribution
    sampling_dist = pm.Mixture("Sampling distribution ",
                               w=[0.5, 0.5],
                               comp_dists=[condition, rn],
                               observed=sample)

# sample using NUTS
trace = pm.sample(1000)
az.plot_trace(trace)

# Sample from a normal mixture
with pm.Model() as model:
    mus = pm.Normal('mus', shape=(6, 12))
    taus = pm.Gamma('taus', alpha=1, beta=1, shape=(6, 12))
    ws = pm.Dirichlet('ws', np.ones(12))
    mixture = pm.NormalMixture('m', w=ws, mu=mus, tau=taus, shape=6)
n_data = 1000
weight = np.array([0.2, 0.3, 0.5])
mu = np.array([-1.5, 0.75, 3.])
sigma = np.array([.75, .5, 1.])
comp = np.random.choice(mu.size, size=n_data, p=weight)
x_data = np.random.normal(mu[comp], sigma[comp], size=n_data)
plt.figure()
plt.hist(x_data, bins=200, label=r'Actual Data')

# inference
if __name__ == '__main__':
    with pm.Model() as model:
        w = pm.Dirichlet('w', np.ones_like(weight))
        mu = pm.Normal('mu', 0., 10., shape=weight.size)
        tau = pm.Gamma('tau', 1., 1., shape=weight.size)
        x_observed = pm.NormalMixture('x_observed', w, mu, tau=tau, \
                                      observed=x_data)
        trace = pm.sample(5000,
                          n_init=10000,
                          tune=1000,
                          random_seed=42,
                          cores=2)
    plt.figure()
    plt.hist(trace['w'], bins=50, label=r"posterior of $\weights$")
    plt.figure()
    plt.hist(trace['mu'], bins=50, label=r"posterior of $\mus$")
    plt.figure()
    plt.hist(trace['tau'], bins=50, label=r"posterior of $\taus$")
    with model:
        p_trace = pm.sample_posterior_predictive(trace, 5000, random_seed=42)
    plt.figure()
    plt.hist(p_trace['x_observed'], bins=50, density=True, histtype='step',\
Example #23
0
#  5 parameters: (mu1, mu2, sigma1, sigma2, ratio)
with pm.Model() as model2:
    M2_mu1 = pm.Uniform('M2_mu1', -5, 5)
    M2_mu2 = pm.Uniform('M2_mu2', -5, 5)

    M2_log_sigma1 = pm.Uniform('M2_log_sigma1', -10, 10)
    M2_log_sigma2 = pm.Uniform('M2_log_sigma2', -10, 10)

    ratio = pm.Uniform('ratio', 1E-3, 1E3)

    w1 = ratio / (1 + ratio)
    w2 = 1 - w1

    y = pm.NormalMixture('doublegauss',
                         w=tt.stack([w1, w2]),
                         mu=tt.stack([M2_mu1, M2_mu2]),
                         sd=tt.stack([np.exp(M2_log_sigma1),
                                      np.exp(M2_log_sigma2)]),
                         observed=x_sample)

    trace2 = pm.sample(draws=2500, tune=100)


# ------------------------------------------------------------
# Compute Odds ratio with density estimation technique

BF1, dBF1 = estimate_bayes_factor(trace1, r=0.05)
BF2, dBF2 = estimate_bayes_factor(trace2, r=0.05)

# ------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
import pymc3 as pm
import seaborn as sns
#import theano
import numpy as np

"""
    Source:
        https://docs.pymc.io/notebooks/variational_api_quickstart.html
"""
     
w = pm.floatX([.2, .8])
mu = pm.floatX([-.3, .5])
sd = pm.floatX([.1, .1])

with pm.Model() as model:
    x = pm.NormalMixture('x', w=w, mu=mu, sd=sd)#, dtype=theano.config.floatX)
    x2 = x ** 2
    sin_x = pm.math.sin(x)
    
    pm.Deterministic('x2', x2)
    pm.Deterministic('sin_x', sin_x)
    
    # Run No-U-Turn sampler
    trace = pm.sample(50000)
    
# Inspect results
pm.traceplot(trace)

# Same model, with ADVI this time
with pm.Model() as model:
    x = pm.NormalMixture('x', w=w, mu=mu, sd=sd) #, dtype=theano.config.floatX)