Example #1
0
    def test_advi(self):
        n = 1000
        sd0 = 2.
        mu0 = 4.
        sd = 3.
        mu = -5.

        data = sd * np.random.randn(n) + mu

        d = n / sd**2 + 1 / sd0**2
        mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=1000, accurate_elbo=False, learning_rate=1e-1)
            np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)
            trace = sample_vp(advi_fit, 10000)

        np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
        np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)

        # Test for n < 10
        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=5, accurate_elbo=False, learning_rate=1e-1)
Example #2
0
def test_advi_optimizer():
    n = 1000
    sd0 = 2.
    mu0 = 4.
    sd = 3.
    mu = -5.

    data = sd * np.random.RandomState(0).randn(n) + mu

    d = n / sd**2 + 1 / sd0**2
    mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

    with Model() as model:
        mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
        Normal('x', mu=mu_, sd=sd, observed=data)

    optimizer = adagrad_optimizer(learning_rate=0.1, epsilon=0.1)
    advi_fit = advi(model=model, n=1000, optimizer=optimizer, random_seed=1)

    np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)

    trace = sample_vp(advi_fit, 10000, model)

    np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
    np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)
Example #3
0
    def test_check_discrete(self):
        with Model():
            switchpoint = DiscreteUniform(
                'switchpoint', lower=self.year.min(), upper=self.year.max(), testval=1900)

            # Priors for pre- and post-switch rates number of disasters
            early_rate = Exponential('early_rate', 1)
            late_rate = Exponential('late_rate', 1)

            # Allocate appropriate Poisson rates to years before and after current
            rate = tt.switch(switchpoint >= self.year, early_rate, late_rate)
            Poisson('disasters', rate, observed=self.disaster_data)

            # This should raise ValueError
            with self.assertRaises(ValueError):
                advi(n=10)
Example #4
0
    def test_check_discrete(self):
        with Model():
            switchpoint = DiscreteUniform(
                'switchpoint', lower=self.year.min(), upper=self.year.max(), testval=1900)

            # Priors for pre- and post-switch rates number of disasters
            early_rate = Exponential('early_rate', 1)
            late_rate = Exponential('late_rate', 1)

            # Allocate appropriate Poisson rates to years before and after current
            rate = tt.switch(switchpoint >= self.year, early_rate, late_rate)
            Poisson('disasters', rate, observed=self.disaster_data)

            # This should raise ValueError
            with self.assertRaises(ValueError):
                advi(n=10)
Example #5
0
def test_advi():
    n = 1000
    sd0 = 2.
    mu0 = 4.
    sd = 3.
    mu = -5.

    data = sd * np.random.RandomState(0).randn(n) + mu

    d = n / sd**2 + 1 / sd0**2
    mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

    with Model() as model: 
        mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
        x = Normal('x', mu=mu_, sd=sd, observed=data)

    advi_fit = advi(
        model=model, n=1000, accurate_elbo=False, learning_rate=1e-1, 
        random_seed=1)

    np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)

    trace = sample_vp(advi_fit, 10000, model)

    np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
    np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)
Example #6
0
 def test_sample_vp(self):
     n_samples = 100
     xs = np.random.binomial(n=1, p=0.2, size=n_samples)
     with pm.Model():
         p = pm.Beta('p', alpha=1, beta=1)
         pm.Binomial('xs', n=1, p=p, observed=xs)
         v_params = advi(n=1000)
         trace = sample_vp(v_params, draws=1, hide_transformed=True)
         self.assertListEqual(trace.varnames, ['p'])
         trace = sample_vp(v_params, draws=1, hide_transformed=False)
         self.assertListEqual(sorted(trace.varnames), ['p', 'p_logodds_'])
Example #7
0
 def test_sample_vp(self):
     n_samples = 100
     xs = np.random.binomial(n=1, p=0.2, size=n_samples)
     with pm.Model():
         p = pm.Beta('p', alpha=1, beta=1)
         pm.Binomial('xs', n=1, p=p, observed=xs)
         v_params = advi(n=1000)
         trace = sample_vp(v_params, draws=1, hide_transformed=True)
         self.assertListEqual(trace.varnames, ['p'])
         trace = sample_vp(v_params, draws=1, hide_transformed=False)
         self.assertListEqual(sorted(trace.varnames), ['p', 'p_logodds_'])
Example #8
0
    def test_advi(self):
        n = 1000
        sd0 = 2.
        mu0 = 4.
        sd = 3.
        mu = -5.

        data = sd * np.random.randn(n) + mu

        d = n / sd**2 + 1 / sd0**2
        mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=1000, accurate_elbo=False, learning_rate=1e-1)
            np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)
            trace = sample_vp(advi_fit, 10000)

        np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
        np.testing.assert_allclose(np.std(trace['mu']),
                                   np.sqrt(1. / d),
                                   rtol=0.4)

        h = self.handler
        self.assertTrue(h.matches(msg="converged"))

        # Test for n < 10
        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=5, accurate_elbo=False, learning_rate=1e-1)

        # Check to raise NaN with a large learning coefficient
        with self.assertRaises(FloatingPointError):
            with Model():
                mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
                Normal('x', mu=mu_, sd=sd, observed=data)
                advi_fit = advi(n=1000,
                                accurate_elbo=False,
                                learning_rate=1e10)
Example #9
0
def build_model():
    y = shared(np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32))
    with Model() as arma_model:
        sigma = HalfCauchy('sigma', 5)
        theta = Normal('theta', 0, sd=2)
        phi = Normal('phi', 0, sd=2)
        mu = Normal('mu', 0, sd=10)

        err0 = y[0] - (mu + phi * mu)

        def calc_next(last_y, this_y, err, mu, phi, theta):
            nu_t = mu + phi * last_y + theta * err
            return this_y - nu_t

        err, _ = scan(fn=calc_next,
                      sequences=dict(input=y, taps=[-1, 0]),
                      outputs_info=[err0],
                      non_sequences=[mu, phi, theta])

        Potential('like', Normal.dist(0, sd=sigma).logp(err))
        variational.advi(n=2000)
    return arma_model
Example #10
0
    def test_advi(self):
        n = 1000
        sd0 = 2.
        mu0 = 4.
        sd = 3.
        mu = -5.

        data = sd * np.random.randn(n) + mu

        d = n / sd**2 + 1 / sd0**2
        mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=1000, accurate_elbo=False, learning_rate=1e-1)
            np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)
            trace = sample_vp(advi_fit, 10000)

        np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
        np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)

        h = self.handler
        assert h.matches(msg="converged")

        # Test for n < 10
        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            advi_fit = advi(n=5, accurate_elbo=False, learning_rate=1e-1)

        # Check to raise NaN with a large learning coefficient
        with pytest.raises(FloatingPointError):
            with Model():
                mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
                Normal('x', mu=mu_, sd=sd, observed=data)
                advi_fit = advi(n=1000, accurate_elbo=False, learning_rate=1e10)
Example #11
0
    def fit(self, n_steps=30000):
        """
        Creates a Bayesian Estimation model for replicate measurements of
        treatment(s) vs. control.

        Parameters
        ----------
        n_steps : int
            The number of steps to run ADVI.
        """

        sample_names = set(self.data[self.sample_col].values)
        sample_names.remove(self.baseline_name)

        with Model() as model:
            # Hyperpriors
            upper = Exponential('upper', lam=0.05)
            nu = Exponential('nu_minus_one', 1 / 29.) + 1

            # "fold", which is the estimated fold change.
            fold = Uniform('fold',
                           lower=1E-10,
                           upper=upper,
                           shape=len(sample_names))

            # Assume that data have heteroskedastic (i.e. variable) error but
            # are drawn from the same HalfCauchy distribution.
            sigma = HalfCauchy('sigma', beta=1, shape=len(sample_names))

            # Model prediction
            mu = fold[self.data['indices']]
            sig = sigma[self.data['indices']]

            # Data likelihood
            like = StudentT('like',
                            nu=nu,
                            mu=mu,
                            sd=sig**-2,
                            observed=self.data[self.output_col])

        self.model = model

        with model:
            params = advi(n=n_steps)
            trace = sample_vp(params, draws=2000)

        self.trace = trace
Example #12
0
def test_sample_vp():
    n_samples = 100

    rng = np.random.RandomState(0)
    xs = rng.binomial(n=1, p=0.2, size=n_samples)

    with pm.Model() as model:
        p = pm.Beta('p', alpha=1, beta=1)
        pm.Binomial('xs', n=1, p=p, observed=xs)
        v_params = advi(n=1000)

    with model:
        trace = sample_vp(v_params, hide_transformed=True)
    assert (set(trace.varnames) == set('p'))

    with model:
        trace = sample_vp(v_params, hide_transformed=False)
    assert (set(trace.varnames) == set(('p', 'p_logodds_')))
Example #13
0
def test_sample_vp():
    n_samples = 100

    rng = np.random.RandomState(0)
    xs = rng.binomial(n=1, p=0.2, size=n_samples)

    with pm.Model() as model:
        p = pm.Beta('p', alpha=1, beta=1)
        pm.Binomial('xs', n=1, p=p, observed=xs)
        v_params = advi(n=1000)

    with model:
        trace = sample_vp(v_params, hide_transformed=True)
    assert(set(trace.varnames) == set('p'))

    with model:
        trace = sample_vp(v_params, hide_transformed=False)
    assert(set(trace.varnames) == set(('p', 'p_logodds_')))
Example #14
0
    def test_advi_optimizer(self):
        n = 1000
        sd0 = 2.
        mu0 = 4.
        sd = 3.
        mu = -5.

        data = sd * np.random.randn(n) + mu

        d = n / sd**2 + 1 / sd0**2
        mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            Normal('x', mu=mu_, sd=sd, observed=data)
            optimizer = adagrad_optimizer(learning_rate=0.1, epsilon=0.1)
            advi_fit = advi(n=1000, optimizer=optimizer)
            np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)
            trace = sample_vp(advi_fit, 10000)

        np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
        np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)
Example #15
0
    err0 = y[0] - (mu + phi*mu)

    def calc_next(last_y, this_y, err, mu, phi, theta):
        nu_t = mu + phi*last_y + theta*err
        return this_y - nu_t

    err, _ = scan(fn=calc_next,
                  sequences=dict(input=y, taps=[-1,0]),
                  outputs_info=[err0],
                  non_sequences=[mu, phi, theta])

    like = Potential('like', Normal.dist(0, sd=sigma).logp(err))

with arma_model:
    mu, sds, elbo = variational.advi(n=2000)


def run(n=1000):
    if n == "short":
        n = 50
    with arma_model:

        trace = sample(1000)

    burn = n/10

    traceplot(trace[burn:])
    plots.summary(trace[burn:])