Beispiel #1
0
def test_datalogpt_multiple_shapes():
    with pm.Model() as m:
        x = pm.Normal("x", 0, 1)
        z1 = pm.Potential("z1", x)
        z2 = pm.Potential("z2", at.full((1, 3), x))
        y1 = pm.Normal("y1", x, 1, observed=np.array([1]))
        y2 = pm.Normal("y2", x, 1, observed=np.array([1, 2]))
        y3 = pm.Normal("y3", x, 1, observed=np.array([1, 2, 3]))

    # This would raise a TypeError, see #4803 and #4804
    x_val = m.rvs_to_values[x]
    m.datalogpt.eval({x_val: 0})
Beispiel #2
0
def test_point_logps_potential():
    with pm.Model() as model:
        x = pm.Flat("x", initval=1)
        y = pm.Potential("y", x * 2)

    logps = model.point_logps()
    assert np.isclose(logps["y"], 2)
Beispiel #3
0
    def setup_class(self):
        super().setup_class()
        self.samples = 1000
        n = 4
        mu1 = np.ones(n) * (1.0 / 2)
        mu2 = -mu1

        stdev = 0.1
        sigma = np.power(stdev, 2) * np.eye(n)
        isigma = np.linalg.inv(sigma)
        dsigma = np.linalg.det(sigma)

        w1 = stdev
        w2 = 1 - stdev

        def two_gaussians(x):
            log_like1 = (-0.5 * n * at.log(2 * np.pi) - 0.5 * at.log(dsigma) -
                         0.5 * (x - mu1).T.dot(isigma).dot(x - mu1))
            log_like2 = (-0.5 * n * at.log(2 * np.pi) - 0.5 * at.log(dsigma) -
                         0.5 * (x - mu2).T.dot(isigma).dot(x - mu2))
            return at.log(w1 * at.exp(log_like1) + w2 * at.exp(log_like2))

        with pm.Model() as self.SMC_test:
            X = pm.Uniform("X", lower=-2, upper=2.0, shape=n)
            llk = pm.Potential("muh", two_gaussians(X))

        self.muref = mu1

        with pm.Model() as self.fast_model:
            x = pm.Normal("x", 0, 1)
            y = pm.Normal("y", x, 1, observed=0)
Beispiel #4
0
def mixture_model(random_seed=1234):
    """Sample mixture model to use in benchmarks"""
    np.random.seed(1234)
    size = 1000
    w_true = np.array([0.35, 0.4, 0.25])
    mu_true = np.array([0.0, 2.0, 5.0])
    sigma = np.array([0.5, 0.5, 1.0])
    component = np.random.choice(mu_true.size, size=size, p=w_true)
    x = np.random.normal(mu_true[component], sigma[component], size=size)

    with pm.Model() as model:
        w = pm.Dirichlet("w", a=np.ones_like(w_true))
        mu = pm.Normal("mu", mu=0.0, sd=10.0, shape=w_true.shape)
        enforce_order = pm.Potential(
            "enforce_order",
            at.switch(mu[0] - mu[1] <= 0, 0.0, -np.inf) +
            at.switch(mu[1] - mu[2] <= 0, 0.0, -np.inf),
        )
        tau = pm.Gamma("tau", alpha=1.0, beta=1.0, shape=w_true.shape)
        pm.NormalMixture("x_obs", w=w, mu=mu, tau=tau, observed=x)

    # Initialization can be poorly specified, this is a hack to make it work
    start = {
        "mu": mu_true.copy(),
        "tau_log__": np.log(1.0 / sigma**2),
        "w_stickbreaking__": np.array([-0.03, 0.44]),
    }
    return model, start
Beispiel #5
0
    def test_potential(self):
        with pm.Model():
            x = pm.Normal("x", 0.0, 1.0)
            pm.Potential("z", pm.logp(pm.Normal.dist(x, 1.0), np.random.randn(10)))
            inference_data = pm.sample(100, chains=2, return_inferencedata=True)

        assert inference_data
Beispiel #6
0
    def set_constraint(self):
        """
        Creates the constraint object needed for the nested sampling
          loops.
        """
        import pymc, numpy

        def do_likelihood(pars=self._priors):
            p = {}
            for key in self.names:
                p[key] = pars[key]
            if self.logLikelihood(p) > self.logLstar:
                return 0
            else:
                return -numpy.inf

        parents = {'pars': self._priors}
        constraint = pymc.Potential(logp=do_likelihood,
                                    name='constraint',
                                    parents=parents,
                                    doc='Likelihood constraint',
                                    verbose=0,
                                    cache_depth=2)

        return constraint
Beispiel #7
0
    def test_potentials_warning(self):
        warning_msg = "The effect of Potentials on other parameters is ignored during"
        with pm.Model() as m:
            a = pm.Normal("a", 0, 1)
            p = pm.Potential("p", a + 1)

        with m:
            with pytest.warns(UserWarning, match=warning_msg):
                pm.sample_prior_predictive(samples=5)
Beispiel #8
0
def test_get_jaxified_logp():
    with pm.Model() as m:
        x = pm.Flat("x")
        y = pm.Flat("y")
        pm.Potential("pot", at.log(at.exp(x) + at.exp(y)))

    jax_fn = get_jaxified_logp(m)
    # This would underflow if not optimized
    assert not np.isinf(jax_fn((np.array(5000.0), np.array(5000.0))))
Beispiel #9
0
def test_rvs_to_value_vars_nested():
    # Test that calling rvs_to_value_vars in models with nested transformations
    # does not change the original rvs in place. See issue #5172
    with pm.Model() as m:
        one = pm.LogNormal("one", mu=0)
        two = pm.LogNormal("two", mu=at.log(one))

        # We add potentials or deterministics that are not in topological order
        pm.Potential("two_pot", two)
        pm.Potential("one_pot", one)

        before = aesara.clone_replace(m.free_RVs)

        # This call would change the model free_RVs in place in #5172
        res, _ = rvs_to_value_vars(m.potentials, apply_transforms=True)

        after = aesara.clone_replace(m.free_RVs)

        assert equal_computations(before, after)
Beispiel #10
0
 def __init__(self, name="", model=None):
     super().__init__(name, model)
     assert pm.modelcontext(None) is self
     # 1) init variables with Var method
     self.register_rv(pm.Normal.dist(), "v1")
     self.v2 = pm.Normal("v2", mu=0, sigma=1)
     # 2) Potentials and Deterministic variables with method too
     # be sure that names will not overlap with other same models
     pm.Deterministic("d", at.constant(1))
     pm.Potential("p", at.constant(1))
Beispiel #11
0
    def test_potentials_warning(self):
        warning_msg = "The effect of Potentials on other parameters is ignored during"
        with pm.Model() as m:
            a = pm.Normal("a", 0, 1)
            p = pm.Potential("p", a + 1)
            obs = pm.Normal("obs", a, 1, observed=5)

        trace = az_from_dict({"a": np.random.rand(10)})
        with pytest.warns(UserWarning, match=warning_msg):
            pm.sample_posterior_predictive_w(samples=5, traces=[trace, trace], models=[m, m])
Beispiel #12
0
def new_player(name, sink=0.5, foul=1e-10): 
    player = {}
    player['sink'] = pm.Beta(name + "_sink", alpha=3, beta=3, value=sink)
    player['foul_end'] = pm.Beta(name + "_foul_end", alpha=3,
                                 beta=3, value=foul)
    vars = player.values()
    player['balance'] = pm.Potential(logp = sum_less_than_one,
                                     name = name + "_balance",
                                     parents = {'vars': vars},
                                     doc = name + "_balance")
        
    return player
Beispiel #13
0
    def setup_class(self):
        super().setup_class()
        self.data = np.random.normal(loc=0, scale=1, size=1000)

        with pm.Model() as self.SMABC_test:
            a = pm.Normal("a", mu=0, sigma=1)
            b = pm.HalfNormal("b", sigma=1)
            s = pm.Simulator("s", self.normal_sim, a, b, sum_stat="sort", observed=self.data)
            self.s = s

        with pm.Model() as self.SMABC_potential:
            a = pm.Normal("a", mu=0, sigma=1, initval=0.5)
            b = pm.HalfNormal("b", sigma=1)
            c = pm.Potential("c", pm.math.switch(a > 0, 0, -np.inf))
            s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data)
 def sample(self, N, T=50, burn=1000):
     x = mc.Uniform('x', -np.ones(self.D), np.ones(self.D), value=np.zeros(self.D))
     def sphere(x):
         if (x**2).sum()>=1.:
             return -np.inf
         else:
             return self.f(x)
     p1 = mc.Potential(
         logp = sphere,
         name = 'sphere',
         parents = {'x': x},
         doc = 'Sphere potential',
         verbose = 0)
     chain = mc.MCMC([x])
     chain.use_step_method(mc.AdaptiveMetropolis, x, delay=burn)
     chain.sample(N*T+burn, thin=T, burn=burn)
     return x.trace()
Beispiel #15
0
    def test_discrete_rounding_proposal(self):
        """
        Test that discrete variable values are automatically rounded
        in SMC logp functions
        """

        with pm.Model() as m:
            z = pm.Bernoulli("z", p=0.7)
            like = pm.Potential("like", z * 1.0)

        smc = IMH(model=m)
        smc.initialize_population()
        smc._initialize_kernel()

        assert smc.prior_logp_func(floatX(np.array([-0.51]))) == -np.inf
        assert np.isclose(smc.prior_logp_func(floatX(np.array([-0.49]))), np.log(0.3))
        assert np.isclose(smc.prior_logp_func(floatX(np.array([0.49]))), np.log(0.3))
        assert np.isclose(smc.prior_logp_func(floatX(np.array([0.51]))), np.log(0.7))
        assert smc.prior_logp_func(floatX(np.array([1.51]))) == -np.inf
Beispiel #16
0
    def IndepSetPotential(self, v, N_v):
        '''
            N_v is neighbors of v
            
            see descption in 
                https://pymc-devs.github.io/pymc/modelbuilding.html#the-potential-classhttps://pymc-devs.github.io/pymc/modelbuilding.html#the-potential-class
        '''
        def potential_logp(v, N_v):
            if v + max(N_v) > 1:
                return -Inf
            else:
                return self.beta * v

        return pm.Potential(logp=potential_logp,
                            name="N_%d" % v,
                            parents={
                                'v': self.x[v],
                                'N_v': [self.x[w] for w in N_v]
                            },
                            doc='vertex potential term')
Beispiel #17
0
def test_tempered_logp_dlogp():
    with pm.Model() as model:
        pm.Normal("x")
        pm.Normal("y", observed=1)
        pm.Potential("z", at.constant(-1.0, dtype=aesara.config.floatX))

    func = model.logp_dlogp_function()
    func.set_extra_values({})

    func_temp = model.logp_dlogp_function(tempered=True)
    func_temp.set_extra_values({})

    func_nograd = model.logp_dlogp_function(compute_grads=False)
    func_nograd.set_extra_values({})

    func_temp_nograd = model.logp_dlogp_function(tempered=True,
                                                 compute_grads=False)
    func_temp_nograd.set_extra_values({})

    x = np.ones(1, dtype=func.dtype)
    npt.assert_allclose(func(x)[0], func_temp(x)[0])
    npt.assert_allclose(func(x)[1], func_temp(x)[1])

    npt.assert_allclose(func_nograd(x), func(x)[0])
    npt.assert_allclose(func_temp_nograd(x), func(x)[0])

    func_temp.set_weights(np.array([0.0], dtype=func.dtype))
    func_temp_nograd.set_weights(np.array([0.0], dtype=func.dtype))
    npt.assert_allclose(func(x)[0], 2 * func_temp(x)[0] - 1)
    npt.assert_allclose(func(x)[1], func_temp(x)[1])

    npt.assert_allclose(func_nograd(x), func(x)[0])
    npt.assert_allclose(func_temp_nograd(x), func_temp(x)[0])

    func_temp.set_weights(np.array([0.5], dtype=func.dtype))
    func_temp_nograd.set_weights(np.array([0.5], dtype=func.dtype))
    npt.assert_allclose(func(x)[0], 4 / 3 * (func_temp(x)[0] - 1 / 4))
    npt.assert_allclose(func(x)[1], func_temp(x)[1])

    npt.assert_allclose(func_nograd(x), func(x)[0])
    npt.assert_allclose(func_temp_nograd(x), func_temp(x)[0])
Beispiel #18
0
def test_duplicate_vars():
    with pytest.raises(ValueError) as err:
        with pm.Model():
            pm.Normal("a")
            pm.Normal("a")
    err.match("already exists")

    with pytest.raises(ValueError) as err:
        with pm.Model():
            pm.Normal("a")
            pm.Normal("a", transform=transforms.log)
    err.match("already exists")

    with pytest.raises(ValueError) as err:
        with pm.Model():
            a = pm.Normal("a")
            pm.Potential("a", a**2)
    err.match("already exists")

    with pytest.raises(ValueError) as err:
        with pm.Model():
            pm.Binomial("a", 10, 0.5)
            pm.Normal("a", transform=transforms.log)
    err.match("already exists")
def another_simple_model():
    _model = models.simple_model()[1]
    with _model:
        pm.Potential("pot", at.ones((10, 10)))
    return _model
Beispiel #20
0
    def sample(self, N: int, T: int = 1, burn: int = 1000) -> List:
        """
        Returns N samples from the distribution defined by applying update_func on the demonstrations and preferences
        observed thus far.

        :param N: number of samples to draw.
        :param T: if greater than 1, all samples except each T^{th} sample are discarded.
        :param burn: how many samples before the chain converges; these initial samples are discarded.
        :return: list of samples drawn.
        """
        x = tt.vector()
        x.tag.test_value = np.random.uniform(-1, 1, self.dim_features)

        # define update function
        start = time.time()
        if self.update_func == "approx":
            self.f = th.function(
                [x],
                tt.sum([
                    -tt.nnet.relu(
                        -self.beta_pref * tt.dot(self.phi_prefs[i], x))
                    for i in range(len(self.phi_prefs))
                ]) + tt.sum(self.beta_demo * tt.dot(self.phi_demos, x)))
        elif self.update_func == "pick_best":
            self.f = th.function(
                [x],
                tt.sum([
                    -tt.log(
                        tt.sum(
                            tt.exp(self.beta_pref *
                                   tt.dot(self.phi_prefs[i], x))))
                    for i in range(len(self.phi_prefs))
                ]) + tt.sum(self.beta_demo * tt.dot(self.phi_demos, x)))
        elif self.update_func == "rank":
            self.f = th.function(
                [x],
                tt.sum(  # summing across different queries
                    [
                        tt.sum(  # summing across different terms in PL-update
                            -tt.log([
                                tt.
                                sum(  # summing down different feature-differences in a single term in PL-update
                                    tt.exp(self.beta_pref * tt.dot(
                                        self.phi_prefs[i][j:, :] -
                                        self.phi_prefs[i][j], x)))
                                for j in range(self.n_query)
                            ])) for i in range(len(self.phi_prefs))
                    ]) + tt.sum(self.beta_demo * tt.dot(self.phi_demos, x)))
        print("Finished constructing sampling function in " +
              str(time.time() - start) + "seconds")

        # perform sampling
        x = mc.Uniform('x',
                       -np.ones(self.dim_features),
                       np.ones(self.dim_features),
                       value=np.zeros(self.dim_features))

        def sphere(x):
            if (x**2).sum() >= 1.:
                return -np.inf
            else:
                return self.f(x)

        p = mc.Potential(logp=sphere,
                         name='sphere',
                         parents={'x': x},
                         doc='Sphere potential',
                         verbose=0)
        chain = mc.MCMC([x])
        chain.use_step_method(mc.AdaptiveMetropolis,
                              x,
                              delay=burn,
                              cov=np.eye(self.dim_features) / 5000)
        chain.sample(N * T + burn, thin=T, burn=burn, verbose=-1)
        samples = x.trace()
        samples = np.array([x / np.linalg.norm(x) for x in samples])

        # print("Finished MCMC after drawing " + str(N*T+burn) + " samples")
        return samples
Beispiel #21
0
import numpy as np
import matplotlib.pyplot as plt
import pymc as pm
k = 3
ndata = 500
spread = 5
centres = np.array([-spread, 0, spread])
v = np.random.randint(0, k, ndata)
data = centres[v] + np.random.randn(ndata)

l_res = plt.hist(data)

plt.show()

a = pm.constant(np.array([1., 1., 1.]))
p = pm.Dirichlet('p', a=a, shape=k)

p_min_potential = pm.Potential('p_min_potential',
                               tt.switch(tt.min(p) < .1, -np.inf, 0))

means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)

order_means_potential = pm.Potential(
    'order_means_potential',
    ttswitch(means[1] - means[0] < 0, -np.inf, 0) +
    ttswitch(means[2] - means[1] < 0, -np.inf, 0))
Beispiel #22
0
@pm.potential
def psi_i(x_lo=x[i], x_hi=x[i + 1]):
    """A pair potential"""
    return -(x_lo - x_hi)**2


def psi_i_logp(x_lo=x[i], x_hi=x[i + 1]):
    return -(x_lo - x_hi)**2


psi_i = pm.Potential(logp=psi_i_logp,
                     name='psi_i',
                     parents={
                         'x_lo': x[i],
                         'x_hi': x[i + 1]
                     },
                     doc='A pair potential',
                     verbose=0,
                     cache_depth=2)


# Just made this up to test the bit of code below
def fun(value, a=1):
    return 2 * a + value


arguments = pm.DictContainer(dict(value=5, a=1))

# Here is the code from the paper.
L = pm.LazyFunction(fun, arguments)
    print c,v 
print "num_pathways:", len(pathways)
print "num_features:", len(features)
print "num_evidence:", len(evidence)
print "num_metfrag: ", len(metfrag_evidence)
rate_prior = 0.5

#eps = Beta('eps', 0.005, 1)
eps = 0.0001
ap =  {p : Gamma('p_' + p, rate_prior, 1) for p in pathways}
bmp = {p : {feat : Gamma('b_{' + p + ',' + feat + '}', ap[p],1) for feat in path_dict[p]} for p in pathways}
y_bmp = {}
g = {}

def logp_f(f, b, eps):
    if f in evidence:
        return math.log(1 - math.e ** (-1 * b) + epsilon)
    if f in metfrag_evidence:
        a_p = (1.0 / (1 - metfrag_evidence[f])) - 1
        return a_p * math.log(1 - math.e ** (-1 * b) + epsilon) - b
    return math.log(eps) - b
psi = {}
for feat, pathways in reverse_path_dict.iteritems():
    y_bmp[feat] = sum([bmp[pname][feat] for pname in pathways])
    g[feat] = Bernoulli('g_' + feat, 1 - math.e ** (-y_bmp[feat]))
    psi[feat] = pymc.Potential(logp = logp_f,
                               name = 'psi_' + feat,
                               parents = {'f' : feat, 'b' : y_bmp[feat], 'eps' : eps},
                               doc = 'hello world potential'
                              )