Example #1
0
 def test_more_than_one_glm_is_ok(self):
     with Model():
         glm.glm('y ~ x',
                 self.data_logistic,
                 family=glm.families.Binomial(link=glm.families.logit),
                 name='glm1')
         glm.glm('y ~ x',
                 self.data_logistic,
                 family=glm.families.Binomial(link=glm.families.logit),
                 name='glm2')
Example #2
0
 def test_prod_multid(self):
     X = np.linspace(0, 1, 30).reshape(10, 3)
     A = np.array([1, 2, 3])
     b = 10
     with Model() as model:
         mean1 = gp.mean.Linear(coeffs=A, intercept=b)
         mean2 = gp.mean.Constant(2)
         mean = mean1 * mean2 * mean2
     M = theano.function([], mean(X))()
     npt.assert_allclose(M[1], 10.8965 * 2 * 2, atol=1e-3)
Example #3
0
 def test_func_args(self):
     X = np.linspace(0,1,10)[:,None]
     Y = np.random.randn(10,1)
     with Model() as model:
         # make a Gaussian model
         with pytest.raises(ValueError):
             random_test = gp.GP('random_test', cov_func=gp.mean.Zero(), observed={'X':X, 'Y':Y})
         with pytest.raises(ValueError):
             random_test = gp.GP('random_test', mean_func=gp.cov.Matern32(1, 1),
                                     cov_func=gp.cov.Matern32(1, 1), observed={'X':X, 'Y':Y})
Example #4
0
 def test_1d(self):
     X = np.linspace(0, 1, 10)[:, None]
     with Model() as model:
         cov_m52 = gp.cov.Matern52(1, 0.2)
         cov = gp.cov.WarpedInput(1,
                                  warp_func=self.warp_func,
                                  args=(1, 10, 1),
                                  cov_func=cov_m52)
     K = theano.function([], cov.K(X))()
     self.assertAlmostEqual(K[0, 1], 0.79593, 4)
Example #5
0
def test_missing_pandas():
    data = pd.DataFrame([1, 2, numpy.nan, 4, numpy.nan])
    with Model() as model:
        x = Normal('x', 1, 1)
        Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2, )

    model.logp(model.test_point)
Example #6
0
    def test_glm(self):
        with Model() as model:
            GLM.from_formula('y ~ x', self.data_linear)
            step = Slice(model.vars)
            trace = sample(500, step=step, tune=0, progressbar=False,
                           random_seed=self.random_seed)

            assert round(abs(np.mean(trace['Intercept'])-self.intercept), 1) == 0
            assert round(abs(np.mean(trace['x'])-self.slope), 1) == 0
            assert round(abs(np.mean(trace['sd'])-self.sd), 1) == 0
Example #7
0
def simple_2model():
    mu = -2.1
    tau = 1.3
    p = .4
    with Model() as model:
        x = pm.Normal('x', mu, tau, testval=.1)
        logx = pm.Deterministic('logx', log(x))
        y = pm.Bernoulli('y', p)

    return model.test_point, model
Example #8
0
    def test_glm_link_func(self):
        with Model() as model:
            GLM.from_formula(
                "y ~ x", self.data_logistic, family=families.Binomial(link=families.logit)
            )
            step = Slice(model.vars)
            trace = sample(1000, step=step, tune=0, progressbar=False, random_seed=self.random_seed)

            assert round(abs(np.mean(trace["Intercept"]) - self.intercept), 1) == 0
            assert round(abs(np.mean(trace["x"]) - self.slope), 1) == 0
Example #9
0
 def test_more_than_one_glm_is_ok(self):
     with Model():
         GLM.from_formula('y ~ x',
                          self.data_logistic,
                          family=families.Binomial(link=families.logit),
                          name='glm1')
         GLM.from_formula('y ~ x',
                          self.data_logistic,
                          family=families.Binomial(link=families.logit),
                          name='glm2')
Example #10
0
def check_dist(dist_case, test_cases, shape=None):
    dist, dist_kwargs = dist_case
    with Model():
        if shape is None:
            rv = dist(dist.__name__, transform=None, **dist_kwargs)
        else:
            rv = dist(dist.__name__, shape=shape, transform=None,
                      **dist_kwargs)
        for size, expected in test_cases:
            check_shape(rv, size=size, expected=expected)
Example #11
0
def test_missing():
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal('x', 1, 1)
        Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2, )

    model.logp(model.test_point)
Example #12
0
 def test_1d(self):
     X = np.linspace(0, 2, 10)[:,None]
     def tanh_func(x, x1, x2, w, x0):
         return (x1 + x2) / 2.0 - (x1 - x2) / 2.0 * tt.tanh((x - x0) / w)
     with Model() as model:
         cov = gp.cov.Gibbs(1, tanh_func, args=(0.05, 0.6, 0.4, 1.0))
     K = theano.function([], cov(X))()
     npt.assert_allclose(K[2, 3], 0.136683, atol=1e-4)
     K = theano.function([], cov(X,X))()
     npt.assert_allclose(K[2, 3], 0.136683, atol=1e-4)
    def test_thread_safety(self):
        """ Regression test for issue #1552: Thread safety of model context manager

        This test creates two threads that attempt to construct two
        unrelated models at the same time.
        For repeatable testing, the two threads are syncronised such
        that thread A enters the context manager first, then B,
        then A attempts to declare a variable while B is still in the context manager.
        """
        aInCtxt, bInCtxt, aDone = [threading.Event() for _ in range(3)]
        modelA = Model()
        modelB = Model()

        def make_model_a():
            with modelA:
                aInCtxt.set()
                bInCtxt.wait()
                Normal('a', 0, 1)
            aDone.set()

        def make_model_b():
            aInCtxt.wait()
            with modelB:
                bInCtxt.set()
                aDone.wait()
                Normal('b', 0, 1)

        threadA = threading.Thread(target=make_model_a)
        threadB = threading.Thread(target=make_model_b)
        threadA.start()
        threadB.start()
        threadA.join()
        threadB.join()
        # now let's see which model got which variable
        # previous to #1555, the variables would be swapped:
        # - B enters it's model context after A, but before a is declared -> a goes into B
        # - A leaves it's model context before B attempts to declare b. A's context manager
        #   takes B from the stack, such that b ends up in model A
        assert (
            list(modelA.named_vars),
            list(modelB.named_vars),
        ) == (['a'], ['b'])
Example #14
0
 def test_1d(self):
     X = np.linspace(0,1,10)[:,None]
     def warp_func(x, a, b, c):
         return x + (a * tt.tanh(b * (x - c)))
     with Model() as model:
         cov_m52 = gp.cov.Matern52(1, 0.2)
         cov = gp.cov.WarpedInput(1, warp_func=warp_func, args=(1,10,1), cov_func=cov_m52)
     K = theano.function([], cov(X))()
     npt.assert_allclose(K[0, 1], 0.79593, atol=1e-3)
     K = theano.function([], cov(X,X))()
     npt.assert_allclose(K[0, 1], 0.79593, atol=1e-3)
Example #15
0
    def test_glm_link_func2(self):
        with Model() as model:
            GLM.from_formula(
                "y ~ x",
                self.data_logistic2,
                family=families.Binomial(priors={"n": self.data_logistic2["n"]}),
            )
            trace = sample(1000, progressbar=False, init="adapt_diag", random_seed=self.random_seed)

            assert round(abs(np.mean(trace["Intercept"]) - self.intercept), 1) == 0
            assert round(abs(np.mean(trace["x"]) - self.slope), 1) == 0
Example #16
0
    def test_shape(self):
        spec = DistSpec(Normal, mu=0, sd=1)
        spec2 = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))

        with Model('layer'):
            var = spec((100, 100), 'var')
            var2 = spec2((100, 100), 'var2')
            self.assertEqual(var.init_value.shape, (100, 100))
            self.assertTrue(var.name.endswith('var'))
            self.assertEqual(var2.init_value.shape, (100, 100))
            self.assertTrue(var2.name.endswith('var2'))
Example #17
0
 def test_sample(self):
     X = np.linspace(0,1,100)[:,None]
     Y = np.random.randn(100,1)
     with Model() as model:
         M = gp.mean.Zero()
         l = Uniform('l', 0, 5)
         K = gp.cov.Matern32(1, l)
         sigma = Uniform('sigma', 0, 10)
         # make a Gaussian model
         random_test = gp.GP('random_test', mean_func=M, cov_func=K, sigma=sigma, observed={'X':X, 'Y':Y})
         tr = sample(500, init=None, progressbar=False, random_seed=self.random_seed)
Example #18
0
 def test_symadd_cov(self):
     X = np.linspace(0, 1, 10)[:, None]
     with Model() as model:
         cov1 = gp.cov.ExpQuad(1, 0.1)
         cov2 = gp.cov.ExpQuad(1, 0.1)
         cov = cov1 + cov2
     K = theano.function([], cov(X))()
     npt.assert_allclose(K[0, 1], 2 * 0.53940, atol=1e-3)
     # check diagonal
     Kd = theano.function([], cov(X, diag=True))()
     npt.assert_allclose(np.diag(K), Kd, atol=1e-5)
Example #19
0
 def test_1d(self):
     X = np.linspace(0, 1, 10)[:, None]
     with Model() as model:
         cov = gp.cov.Polynomial(1, 0.5, 2, 0)
     K = theano.function([], cov(X))()
     npt.assert_allclose(K[0, 1], 0.03780, atol=1e-3)
     K = theano.function([], cov(X, X))()
     npt.assert_allclose(K[0, 1], 0.03780, atol=1e-3)
     # check diagonal
     Kd = theano.function([], cov(X, diag=True))()
     npt.assert_allclose(np.diag(K), Kd, atol=1e-5)
Example #20
0
def simple_arbitrary_det():
    @as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])
    def arbitrary_det(value):
        return value

    with Model() as model:
        a = Normal('a')
        b = arbitrary_det(a)
        Normal('obs', mu=b.astype('float64'), observed=np.array([1, 3, 5]))

    return model.test_point, model
Example #21
0
    def test_glm_from_formula(self):
        with Model() as model:
            NAME = 'glm'
            GLM.from_formula('y ~ x', self.data_linear, name=NAME)
            start = find_MAP()
            step = Slice(model.vars)
            trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)

            self.assertAlmostEqual(np.mean(trace['%s_Intercept' % NAME]), self.intercept, 1)
            self.assertAlmostEqual(np.mean(trace['%s_x' % NAME]), self.slope, 1)
            self.assertAlmostEqual(np.mean(trace['%s_sd' % NAME]), self.sd, 1)
Example #22
0
 def test_1d(self):
     X = np.linspace(0, 1, 10)[:, None]
     with Model() as model:
         cov = gp.cov.Cosine(1, 0.1)
     K = theano.function([], cov(X))()
     npt.assert_allclose(K[0, 1], -0.93969, atol=1e-3)
     K = theano.function([], cov(X, X))()
     npt.assert_allclose(K[0, 1], -0.93969, atol=1e-3)
     # check diagonal
     Kd = theano.function([], cov(X, diag=True))()
     npt.assert_allclose(np.diag(K), Kd, atol=1e-5)
Example #23
0
    def test_advi(self):
        n = 1000
        sd0 = 2.
        mu0 = 4.
        sd = 3.
        mu = -5.

        data = sd * np.random.randn(n) + mu

        d = n / sd**2 + 1 / sd0**2
        mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=1000, accurate_elbo=False, learning_rate=1e-1)
            np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)
            trace = sample_vp(advi_fit, 10000)

        np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
        np.testing.assert_allclose(np.std(trace['mu']),
                                   np.sqrt(1. / d),
                                   rtol=0.4)

        h = self.handler
        self.assertTrue(h.matches(msg="converged"))

        # Test for n < 10
        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=5, accurate_elbo=False, learning_rate=1e-1)

        # Check to raise NaN with a large learning coefficient
        with self.assertRaises(FloatingPointError):
            with Model():
                mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
                Normal('x', mu=mu_, sd=sd, observed=data)
                advi_fit = advi(n=1000,
                                accurate_elbo=False,
                                learning_rate=1e10)
Example #24
0
    def test_linear_component_from_formula(self):
        with Model() as model:
            lm = LinearComponent.from_formula('y ~ x', self.data_linear)
            sigma = Uniform('sigma', 0, 20)
            Normal('y_obs', mu=lm.y_est, sd=sigma, observed=self.y_linear)
            start = find_MAP(vars=[sigma])
            step = Slice(model.vars)
            trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)

            self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)
            self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)
            self.assertAlmostEqual(np.mean(trace['sigma']), self.sd, 1)
Example #25
0
def test_allinmodel():
    model1 = Model()
    model2 = Model()
    with model1:
        x1 = Normal('x1', mu=0, sigma=1)
        y1 = Normal('y1', mu=0, sigma=1)
    with model2:
        x2 = Normal('x2', mu=0, sigma=1)
        y2 = Normal('y2', mu=0, sigma=1)

    starting.allinmodel([x1, y1], model1)
    starting.allinmodel([x1], model1)
    with raises(ValueError,
                match=r"Some variables not in the model: \['x2', 'y2'\]"):
        starting.allinmodel([x2, y2], model1)
    with raises(ValueError,
                match=r"Some variables not in the model: \['x2'\]"):
        starting.allinmodel([x2, y1], model1)
    with raises(ValueError,
                match=r"Some variables not in the model: \['x2'\]"):
        starting.allinmodel([x2], model1)
Example #26
0
 def test_multiops(self):
     X = np.linspace(0, 1, 3)[:, None]
     M = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]])
     with Model() as model:
         cov1 = 3 + gp.cov.ExpQuad(
             1,
             0.1) + M * gp.cov.ExpQuad(1, 0.1) * M * gp.cov.ExpQuad(1, 0.1)
         cov2 = gp.cov.ExpQuad(1, 0.1) * M * gp.cov.ExpQuad(
             1, 0.1) * M + gp.cov.ExpQuad(1, 0.1) + 3
     K1 = theano.function([], cov1(X))()
     K2 = theano.function([], cov2(X))()
     self.assertTrue(np.allclose(K1, K2))
Example #27
0
    def test_mixture_list_of_poissons(self):
        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(self.pois_w)), shape=self.pois_w.shape)
            mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
            Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=self.pois_x)
            step = Metropolis()
            trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.pois_w), rtol=0.1, atol=0.1)
        assert_allclose(
            np.sort(trace["mu"].mean(axis=0)), np.sort(self.pois_mu), rtol=0.1, atol=0.1
        )
Example #28
0
def simple_model(simple_model_data):
    with Model() as model:
        mu_ = Normal('mu',
                     mu=simple_model_data['mu0'],
                     sd=simple_model_data['sd0'],
                     testval=0)
        Normal('x',
               mu=mu_,
               sd=simple_model_data['sd'],
               observed=simple_model_data['data'],
               total_size=simple_model_data['n'])
    return model
Example #29
0
def test_internal_missing_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        with pytest.warns(ImputationWarning):
            theta1 = Normal('theta1', mu=2, observed=obs1)
        with pytest.warns(ImputationWarning):
            theta2 = Normal('theta2', mu=theta1, observed=obs2)

        prior_trace = sample_prior_predictive()
        assert set(['theta1', 'theta2']) <= set(prior_trace.keys())
        sample()
Example #30
0
def simple_arbitrary_det():
    scalar_type = tt.dscalar if theano.config.floatX == "float64" else tt.fscalar

    @as_op(itypes=[scalar_type], otypes=[scalar_type])
    def arbitrary_det(value):
        return value

    with Model() as model:
        a = Normal("a")
        b = arbitrary_det(a)
        Normal("obs", mu=b.astype("float64"), observed=floatX_array([1, 3, 5]))

    return model.test_point, model
from theano import tensor as tt
from theano import function


TRP = theano.shared(ReferencePeriod)
TBinTimes = theano.shared(ShiftedBinTimes)
#TInterpBasis  = theano.shared(InterpBasis)
#TInterpolatedTime = theano.shared(InterpolatedTime)

TFlatTimes = theano.shared(FlatBinTimes)
FlatData = (ProfileData.flatten())[:useToAs*1024]




basic_model = Model()

with basic_model:

	# Priors for unknown model parameters
	amplitude = Normal('amplitude', mu=0, sd=10000, shape = useToAs)
	offset = Normal('offset', mu=0, sd=10000, shape = useToAs)
	noise = HalfNormal('noise', sd=10000, shape = useToAs)
	phase = Uniform('phase', lower = 0, upper = ReferencePeriod)


	#parameters that define a two gaussian model
	gsep    = Savex[1]*ReferencePeriod/1024
	g1width = Savex[2]*ReferencePeriod/1024
	g2width = Savex[3]*ReferencePeriod/1024
	g2amp   = Savex[4]
nObs = T.sum()
S_start = S_start[0:nObs]
obs_jumps = obs_jumps[0:nObs]
X_start = X_start[0:nObs]
O = O[0:nObs]

nObs = S_start.shape[0]
N = T.shape[0]  # Number of patients
M = pi_start.shape[0]  # Number of hidden states
K = Z_start.shape[0]  # Number of comorbidities
D = Z_start.shape[1]  # Number of claims
Dd = 16  # Maximum number of claims that can occur at once

# import pdb; pdb.set_trace()

model = Model()
with model:
    # Fails: #pi = Dirichlet('pi', a = as_tensor_variable([0.147026,0.102571,0.239819,0.188710,0.267137,0.054738]), shape=M, testval = np.ones(M)/float(M))
    pi = Dirichlet("pi", a=as_tensor_variable(pi_start.copy()), shape=M)
    pi_min_potential = Potential("pi_min_potential", TT.switch(TT.min(pi) < 0.001, -np.inf, 0))

    Q = DiscreteObsMJP_unif_prior("Q", M=M, lower=0.0, upper=1.0, shape=(M, M))

    # S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs), testval=np.ones(nObs,dtype='int32'))
    S = DiscreteObsMJP("S", pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs))

    # B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
    # B = Beta('B', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
    B0 = Beta("B0", alpha=1.0, beta=1.0, shape=(K, M))
    B = Beta("B", alpha=1.0, beta=1.0, shape=(K, M))