Exemplo n.º 1
0
    def test_mixture_list_of_normals(self):
        with Model() as model:
            w = Dirichlet('w', floatX(np.ones_like(self.norm_w)))
            mu = Normal('mu', 0., 10., shape=self.norm_w.size)
            tau = Gamma('tau', 1., 1., shape=self.norm_w.size)
            Mixture('x_obs',
                    w, [
                        Normal.dist(mu[0], tau=tau[0]),
                        Normal.dist(mu[1], tau=tau[1])
                    ],
                    observed=self.norm_x)
            step = Metropolis()
            trace = sample(5000,
                           step,
                           random_seed=self.random_seed,
                           progressbar=False,
                           chains=1)

        assert_allclose([
            np.sort(trace['w'].mean(axis=0)),
            np.sort(trace['mu'].mean(axis=0))
        ], [np.sort(self.norm_w), np.sort(self.norm_mu)],
                        rtol=0.1,
                        atol=0.1)
        assert_allclose(np.sort(trace['mu'].mean(axis=0)),
                        np.sort(self.norm_mu),
                        rtol=0.1,
                        atol=0.1)
Exemplo n.º 2
0
    def test_mixture_list_of_normals(self):
        with Model() as model:
            w = Dirichlet("w",
                          floatX(np.ones_like(self.norm_w)),
                          shape=self.norm_w.size)
            mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
            tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
            Mixture(
                "x_obs",
                w,
                [
                    Normal.dist(mu[0], tau=tau[0]),
                    Normal.dist(mu[1], tau=tau[1])
                ],
                observed=self.norm_x,
            )
            step = Metropolis()
            trace = sample(5000,
                           step,
                           random_seed=self.random_seed,
                           progressbar=False,
                           chains=1)

        assert_allclose(np.sort(trace["w"].mean(axis=0)),
                        np.sort(self.norm_w),
                        rtol=0.1,
                        atol=0.1)
        assert_allclose(np.sort(trace["mu"].mean(axis=0)),
                        np.sort(self.norm_mu),
                        rtol=0.1,
                        atol=0.1)
Exemplo n.º 3
0
    def test_dimensions(self):
        a1 = Normal.dist(mu=0, sigma=1)
        a2 = Normal.dist(mu=10, sigma=1)
        mix = Mixture.dist(w=np.r_[0.5, 0.5], comp_dists=[a1, a2])

        assert mix.mode.ndim == 0
        assert mix.logp(0.0).ndim == 0

        value = np.r_[0.0, 1.0, 2.0]
        assert mix.logp(value).ndim == 1
Exemplo n.º 4
0
    def logp(self, observed):
        """Calculated the log likelihood of the observed streamflow given
        simulated streamflow from GR4J"""

        simulated = simulate_streamflow(self.precipitation,
                                        self.evaporation,
                                        self.S0,
                                        self.Pr0,
                                        self.R0,
                                        self.x1,
                                        self.x2,
                                        self.x3,
                                        self.x4,
                                        self.x4_limit,
                                        truncate_gradient=self.truncate,
                                        tv_x1=self.tv_x1)

        # This restricts likelihood calculations to fewer than len(observed)
        # points. This can potentially make for more rapid calculations.
        if self.subsample_index is not None:
            observed = observed[self.subsample_index]
            simulated = simulated[self.subsample_index]

        density = Normal.dist(mu=simulated, sd=self.sd)
        return tt.sum(density.logp(observed))
Exemplo n.º 5
0
    def test_mixture_list_of_normals(self):
        with Model() as model:
            w = Dirichlet('w', np.ones_like(self.norm_w))
            mu = Normal('mu', 0., 10., shape=self.norm_w.size)
            tau = Gamma('tau', 1., 1., shape=self.norm_w.size)
            Mixture('x_obs', w,
                    [Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
                    observed=self.norm_x)
            step = Metropolis()
            trace = sample(5000, step, random_seed=self.random_seed, progressbar=False)

        assert_allclose(np.sort(trace['w'].mean(axis=0)),
                        np.sort(self.norm_w),
                        rtol=0.1, atol=0.1)
        assert_allclose(np.sort(trace['mu'].mean(axis=0)),
                        np.sort(self.norm_mu),
                        rtol=0.1, atol=0.1)
Exemplo n.º 6
0
    def logp(self, x):
        tau = self.tau
        sd = self.sd
        A = self.A
        B = self.B
        u = self.u
        init = self.init

        # x[0,:] = init
        x_im1 = x[:-1]
        x_i = x[1:]
        u_im1 = u[:-1]
        innov_like = Normal.dist(mu=T.dot(A, x_im1.T) + T.dot(B, u_im1.T),
                                 tau=tau,
                                 sd=sd).logp(x_i.T)
        return T.sum(init.logp(x[0])) + T.sum(innov_like)
Exemplo n.º 7
0
    def test_normal_mixture_nd(self):
        nd, ncomp = 3, 5

        with Model() as model0:
            mus = Normal('mus', shape=(nd, ncomp))
            taus = Gamma('taus', alpha=1, beta=1, shape=(nd, ncomp))
            ws = Dirichlet('ws', np.ones(ncomp))
            mixture0 = NormalMixture('m', w=ws, mu=mus, tau=taus, shape=nd)

        with Model() as model1:
            mus = Normal('mus', shape=(nd, ncomp))
            taus = Gamma('taus', alpha=1, beta=1, shape=(nd, ncomp))
            ws = Dirichlet('ws', np.ones(ncomp))
            comp_dist = [Normal.dist(mu=mus[:, i], tau=taus[:, i])
                         for i in range(ncomp)]
            mixture1 = Mixture('m', w=ws, comp_dists=comp_dist, shape=nd)

        testpoint = model0.test_point
        testpoint['mus'] = np.random.randn(nd, ncomp)
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
Exemplo n.º 8
0
def build_model():
    y = shared(np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32))
    with Model() as arma_model:
        sigma = HalfCauchy('sigma', 5)
        theta = Normal('theta', 0, sd=2)
        phi = Normal('phi', 0, sd=2)
        mu = Normal('mu', 0, sd=10)

        err0 = y[0] - (mu + phi * mu)

        def calc_next(last_y, this_y, err, mu, phi, theta):
            nu_t = mu + phi * last_y + theta * err
            return this_y - nu_t

        err, _ = scan(fn=calc_next,
                      sequences=dict(input=y, taps=[-1, 0]),
                      outputs_info=[err0],
                      non_sequences=[mu, phi, theta])

        Potential('like', Normal.dist(0, sd=sigma).logp(err))
        variational.advi(n=2000)
    return arma_model
Exemplo n.º 9
0
    def test_normal_mixture_nd(self):
        nd, ncomp = 3, 5

        with Model() as model0:
            mus = Normal('mus', shape=(nd, ncomp))
            taus = Gamma('taus', alpha=1, beta=1, shape=(nd, ncomp))
            ws = Dirichlet('ws', np.ones(ncomp))
            mixture0 = NormalMixture('m', w=ws, mu=mus, tau=taus, shape=nd)

        with Model() as model1:
            mus = Normal('mus', shape=(nd, ncomp))
            taus = Gamma('taus', alpha=1, beta=1, shape=(nd, ncomp))
            ws = Dirichlet('ws', np.ones(ncomp))
            comp_dist = [
                Normal.dist(mu=mus[:, i], tau=taus[:, i]) for i in range(ncomp)
            ]
            mixture1 = Mixture('m', w=ws, comp_dists=comp_dist, shape=nd)

        testpoint = model0.test_point
        testpoint['mus'] = np.random.randn(nd, ncomp)
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
Exemplo n.º 10
0
def build_model():
    y = shared(np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32))
    with Model() as arma_model:
        sigma = HalfCauchy('sigma', 5)
        theta = Normal('theta', 0, sd=2)
        phi = Normal('phi', 0, sd=2)
        mu = Normal('mu', 0, sd=10)

        err0 = y[0] - (mu + phi * mu)

        def calc_next(last_y, this_y, err, mu, phi, theta):
            nu_t = mu + phi * last_y + theta * err
            return this_y - nu_t

        err, _ = scan(fn=calc_next,
                      sequences=dict(input=y, taps=[-1, 0]),
                      outputs_info=[err0],
                      non_sequences=[mu, phi, theta])

        Potential('like', Normal.dist(0, sd=sigma).logp(err))
        mu, sds, elbo = variational.advi(n=2000)
    return arma_model
Exemplo n.º 11
0
    def test_mixture_of_mixture(self):
        if theano.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7
        nbr = 4
        with Model() as model:
            # mixtures components
            g_comp = Normal.dist(mu=Exponential("mu_g",
                                                lam=1.0,
                                                shape=nbr,
                                                transform=None),
                                 sigma=1,
                                 shape=nbr)
            l_comp = Lognormal.dist(mu=Exponential("mu_l",
                                                   lam=1.0,
                                                   shape=nbr,
                                                   transform=None),
                                    sigma=1,
                                    shape=nbr)
            # weight vector for the mixtures
            g_w = Dirichlet("g_w",
                            a=floatX(np.ones(nbr) * 0.0000001),
                            transform=None,
                            shape=(nbr, ))
            l_w = Dirichlet("l_w",
                            a=floatX(np.ones(nbr) * 0.0000001),
                            transform=None,
                            shape=(nbr, ))
            # mixture components
            g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
            l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
            # mixture of mixtures
            mix_w = Dirichlet("mix_w",
                              a=floatX(np.ones(2)),
                              transform=None,
                              shape=(2, ))
            mix = Mixture("mix",
                          w=mix_w,
                          comp_dists=[g_mix, l_mix],
                          observed=np.exp(self.norm_x))

        test_point = model.test_point

        def mixmixlogp(value, point):
            floatX = theano.config.floatX
            priorlogp = (st.dirichlet.logpdf(
                x=point["g_w"],
                alpha=np.ones(nbr) * 0.0000001,
            ).astype(floatX) +
                         st.expon.logpdf(x=point["mu_g"]).sum(dtype=floatX) +
                         st.dirichlet.logpdf(
                             x=point["l_w"],
                             alpha=np.ones(nbr) * 0.0000001,
                         ).astype(floatX) +
                         st.expon.logpdf(x=point["mu_l"]).sum(dtype=floatX) +
                         st.dirichlet.logpdf(
                             x=point["mix_w"],
                             alpha=np.ones(2),
                         ).astype(floatX))
            complogp1 = st.norm.logpdf(x=value,
                                       loc=point["mu_g"]).astype(floatX)
            mixlogp1 = logsumexp(np.log(point["g_w"]).astype(floatX) +
                                 complogp1,
                                 axis=-1,
                                 keepdims=True)
            complogp2 = st.lognorm.logpdf(value, 1.0, 0.0,
                                          np.exp(point["mu_l"])).astype(floatX)
            mixlogp2 = logsumexp(np.log(point["l_w"]).astype(floatX) +
                                 complogp2,
                                 axis=-1,
                                 keepdims=True)
            complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
            mixmixlogpg = logsumexp(np.log(point["mix_w"]).astype(floatX) +
                                    complogp_mix,
                                    axis=-1,
                                    keepdims=False)
            return priorlogp, mixmixlogpg

        value = np.exp(self.norm_x)[:, None]
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)

        # check logp of mixture
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)

        # check model logp
        assert_allclose(priorlogp + mixmixlogpg.sum(),
                        model.logp(test_point),
                        rtol=rtol)

        # check input and check logp again
        test_point["g_w"] = np.asarray([0.1, 0.1, 0.2, 0.6])
        test_point["mu_g"] = np.exp(np.random.randn(nbr))
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
        assert_allclose(priorlogp + mixmixlogpg.sum(),
                        model.logp(test_point),
                        rtol=rtol)
Exemplo n.º 12
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp, )
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(w=np.ones(ncomp) / ncomp,
                                                mu=test_mus,
                                                sd=1 / np.sqrt(test_taus),
                                                size=10)

        with Model() as model0:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp, ))
            mixture0 = NormalMixture("m",
                                     w=ws,
                                     mu=mus,
                                     tau=taus,
                                     shape=nd,
                                     comp_shape=comp_shape)
            obs0 = NormalMixture("obs",
                                 w=ws,
                                 mu=mus,
                                 tau=taus,
                                 shape=nd,
                                 comp_shape=comp_shape,
                                 observed=observed)

        with Model() as model1:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp, ))
            comp_dist = [
                Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd)
                for i in range(ncomp)
            ]
            mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture("obs",
                           w=ws,
                           comp_dists=comp_dist,
                           shape=nd,
                           observed=observed)

        with Model() as model2:
            # Expected to fail if comp_shape is not provided,
            # nd is multidim and it does not broadcast with ncomp. If by chance
            # it does broadcast, an error is raised if the mixture is given
            # observed data.
            # Furthermore, the Mixture will also raise errors when the observed
            # data is multidimensional but it does not broadcast well with
            # comp_dists.
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp, ))
            if len(nd) > 1:
                if nd[-1] != ncomp:
                    with pytest.raises(ValueError):
                        NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
                    mixture2 = None
                else:
                    mixture2 = NormalMixture("m",
                                             w=ws,
                                             mu=mus,
                                             tau=taus,
                                             shape=nd)
            else:
                mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            observed_fails = False
            if len(nd) >= 1 and nd != (1, ):
                try:
                    np.broadcast(np.empty(comp_shape), observed)
                except Exception:
                    observed_fails = True
            if observed_fails:
                with pytest.raises(ValueError):
                    NormalMixture("obs",
                                  w=ws,
                                  mu=mus,
                                  tau=taus,
                                  shape=nd,
                                  observed=observed)
                obs2 = None
            else:
                obs2 = NormalMixture("obs",
                                     w=ws,
                                     mu=mus,
                                     tau=taus,
                                     shape=nd,
                                     observed=observed)

        testpoint = model0.test_point
        testpoint["mus"] = test_mus
        testpoint["taus"] = test_taus
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
        assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
        if mixture2 is not None and obs2 is not None:
            assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
        if mixture2 is not None:
            assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
        if obs2 is not None:
            assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
Exemplo n.º 13
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(w=np.ones(ncomp)/ncomp,
                                                mu=test_mus,
                                                sd=1/np.sqrt(test_taus),
                                                size=10)

        with Model() as model0:
            mus = Normal('mus', shape=comp_shape)
            taus = Gamma('taus', alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet('ws', np.ones(ncomp))
            mixture0 = NormalMixture('m', w=ws, mu=mus, tau=taus, shape=nd,
                                     comp_shape=comp_shape)
            obs0 = NormalMixture('obs', w=ws, mu=mus, tau=taus, shape=nd,
                                 comp_shape=comp_shape,
                                 observed=observed)

        with Model() as model1:
            mus = Normal('mus', shape=comp_shape)
            taus = Gamma('taus', alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet('ws', np.ones(ncomp))
            comp_dist = [Normal.dist(mu=mus[..., i], tau=taus[..., i],
                                     shape=nd)
                         for i in range(ncomp)]
            mixture1 = Mixture('m', w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture('obs', w=ws, comp_dists=comp_dist, shape=nd,
                           observed=observed)

        with Model() as model2:
            # Expected to fail if comp_shape is not provided,
            # nd is multidim and it does not broadcast with ncomp. If by chance
            # it does broadcast, an error is raised if the mixture is given
            # observed data.
            # Furthermore, the Mixture will also raise errors when the observed
            # data is multidimensional but it does not broadcast well with
            # comp_dists.
            mus = Normal('mus', shape=comp_shape)
            taus = Gamma('taus', alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet('ws', np.ones(ncomp))
            if len(nd) > 1:
                if nd[-1] != ncomp:
                    with pytest.raises(ValueError):
                        NormalMixture('m', w=ws, mu=mus, tau=taus,
                                      shape=nd)
                    mixture2 = None
                else:
                    mixture2 = NormalMixture('m', w=ws, mu=mus, tau=taus,
                                             shape=nd)
            else:
                mixture2 = NormalMixture('m', w=ws, mu=mus, tau=taus,
                                         shape=nd)
            observed_fails = False
            if len(nd) >= 1 and nd != (1,):
                try:
                    np.broadcast(np.empty(comp_shape), observed)
                except Exception:
                    observed_fails = True
            if observed_fails:
                with pytest.raises(ValueError):
                    NormalMixture('obs', w=ws, mu=mus, tau=taus,
                                  shape=nd,
                                  observed=observed)
                obs2 = None
            else:
                obs2 = NormalMixture('obs', w=ws, mu=mus, tau=taus,
                                     shape=nd,
                                     observed=observed)

        testpoint = model0.test_point
        testpoint['mus'] = test_mus
        testpoint['taus'] = test_taus
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
        assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
        if mixture2 is not None and obs2 is not None:
            assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
        if mixture2 is not None:
            assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
        if obs2 is not None:
            assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
Exemplo n.º 14
0
    def test_mixture_of_mixture(self):
        if theano.config.floatX == 'float32':
            rtol = 1e-4
        else:
            rtol = 1e-7
        nbr = 4
        with Model() as model:
            # mixtures components
            g_comp = Normal.dist(
                mu=Exponential('mu_g', lam=1.0, shape=nbr, transform=None),
                sigma=1,
                shape=nbr)
            l_comp = Lognormal.dist(
                mu=Exponential('mu_l', lam=1.0, shape=nbr, transform=None),
                sigma=1,
                shape=nbr)
            # weight vector for the mixtures
            g_w = Dirichlet('g_w', a=floatX(np.ones(nbr)*0.0000001), transform=None)
            l_w = Dirichlet('l_w', a=floatX(np.ones(nbr)*0.0000001), transform=None)
            # mixture components
            g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
            l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
            # mixture of mixtures
            mix_w = Dirichlet('mix_w', a=floatX(np.ones(2)), transform=None)
            mix = Mixture('mix', w=mix_w,
                          comp_dists=[g_mix, l_mix],
                          observed=np.exp(self.norm_x))

        test_point = model.test_point

        def mixmixlogp(value, point):
            floatX = theano.config.floatX
            priorlogp = st.dirichlet.logpdf(x=point['g_w'],
                                            alpha=np.ones(nbr)*0.0000001,
                                            ).astype(floatX) + \
                        st.expon.logpdf(x=point['mu_g']).sum(dtype=floatX) + \
                        st.dirichlet.logpdf(x=point['l_w'],
                                            alpha=np.ones(nbr)*0.0000001,
                                            ).astype(floatX) + \
                        st.expon.logpdf(x=point['mu_l']).sum(dtype=floatX) + \
                        st.dirichlet.logpdf(x=point['mix_w'],
                                            alpha=np.ones(2),
                                            ).astype(floatX)
            complogp1 = st.norm.logpdf(x=value,
                                       loc=point['mu_g']).astype(floatX)
            mixlogp1 = logsumexp(np.log(point['g_w']).astype(floatX) +
                                 complogp1,
                                 axis=-1, keepdims=True)
            complogp2 = st.lognorm.logpdf(value,
                                          1.,
                                          0.,
                                          np.exp(point['mu_l'])).astype(floatX)
            mixlogp2 = logsumexp(np.log(point['l_w']).astype(floatX) +
                                 complogp2,
                                 axis=-1, keepdims=True)
            complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
            mixmixlogpg = logsumexp(np.log(point['mix_w']).astype(floatX) +
                                    complogp_mix,
                                    axis=-1, keepdims=True)
            return priorlogp, mixmixlogpg

        value = np.exp(self.norm_x)[:, None]
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)

        # check logp of mixture
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point),
                        rtol=rtol)

        # check model logp
        assert_allclose(priorlogp + mixmixlogpg.sum(),
                        model.logp(test_point),
                        rtol=rtol)

        # check input and check logp again
        test_point['g_w'] = np.asarray([.1, .1, .2, .6])
        test_point['mu_g'] = np.exp(np.random.randn(nbr))
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point),
                        rtol=rtol)
        assert_allclose(priorlogp + mixmixlogpg.sum(),
                        model.logp(test_point),
                        rtol=rtol)
Exemplo n.º 15
0
    theta = Normal('theta', 0, sd=2)
    phi = Normal('phi', 0, sd=2)
    mu = Normal('mu', 0, sd=10)

    err0 = y[0] - (mu + phi*mu)

    def calc_next(last_y, this_y, err, mu, phi, theta):
        nu_t = mu + phi*last_y + theta*err
        return this_y - nu_t

    err, _ = scan(fn=calc_next,
                  sequences=dict(input=y, taps=[-1,0]),
                  outputs_info=[err0],
                  non_sequences=[mu, phi, theta])

    like = Potential('like', Normal.dist(0, sd=sigma).logp(err))

with arma_model:
    mu, sds, elbo = variational.advi(n=2000)


def run(n=1000):
    if n == "short":
        n = 50
    with arma_model:

        trace = sample(1000)

    burn = n/10

    traceplot(trace[burn:])
Exemplo n.º 16
0
    def test_mixture_of_mixture(self):
        nbr = 4
        with Model() as model:
            # mixtures components
            g_comp = Normal.dist(mu=Exponential('mu_g',
                                                lam=1.0,
                                                shape=nbr,
                                                transform=None),
                                 sigma=1,
                                 shape=nbr)
            l_comp = Lognormal.dist(mu=Exponential('mu_l',
                                                   lam=1.0,
                                                   shape=nbr,
                                                   transform=None),
                                    sigma=1,
                                    shape=nbr)
            # weight vector for the mixtures
            g_w = Dirichlet('g_w',
                            a=floatX(np.ones(nbr) * 0.0000001),
                            transform=None)
            l_w = Dirichlet('l_w',
                            a=floatX(np.ones(nbr) * 0.0000001),
                            transform=None)
            # mixture components
            g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
            l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
            # mixture of mixtures
            mix_w = Dirichlet('mix_w', a=floatX(np.ones(2)), transform=None)
            mix = Mixture('mix',
                          w=mix_w,
                          comp_dists=[g_mix, l_mix],
                          observed=np.exp(self.norm_x))

        test_point = model.test_point

        def mixmixlogp(value, point):
            priorlogp = st.dirichlet.logpdf(x=point['g_w'],
                                            alpha=np.ones(nbr)*0.0000001,
                                            ) + \
                        st.expon.logpdf(x=point['mu_g']).sum() + \
                        st.dirichlet.logpdf(x=point['l_w'],
                                            alpha=np.ones(nbr)*0.0000001,
                                            ) + \
                        st.expon.logpdf(x=point['mu_l']).sum() + \
                        st.dirichlet.logpdf(x=point['mix_w'],
                                            alpha=np.ones(2),
                                            )
            complogp1 = st.norm.logpdf(x=value, loc=point['mu_g'])
            mixlogp1 = logsumexp(np.log(point['g_w']) + complogp1,
                                 axis=-1,
                                 keepdims=True)
            complogp2 = st.lognorm.logpdf(value, 1., 0., np.exp(point['mu_l']))
            mixlogp2 = logsumexp(np.log(point['l_w']) + complogp2,
                                 axis=-1,
                                 keepdims=True)
            complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
            mixmixlogpg = logsumexp(np.log(point['mix_w']) + complogp_mix,
                                    axis=-1,
                                    keepdims=True)
            return priorlogp, mixmixlogpg

        value = np.exp(self.norm_x)[:, None]
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)

        # check logp of mixture
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point))

        # check model logp
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point))

        # check input and check logp again
        test_point['g_w'] = np.asarray([.1, .1, .2, .6])
        test_point['mu_g'] = np.exp(np.random.randn(nbr))
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point))
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point))
Exemplo n.º 17
0
with beta_carotene_model:
    samples = fit(random_seed=None).sample(1000)

##plot posterior

from pymc3 import plot_posterior

plot_posterior(samples, varnames=['mean'], ref_val=240, color="LightSeaGreen")

##What is the probability a randomly choose person willhave higher beta carotene level more than 130

mus = samples["mean"]
sigmas = samples["sigma"]

random_samples = Normal.dist(mus, sigmas).random()

sns.distplot(random_samples, label='simulated')
sns.distplot(beat_carotene_levels_before_drug, label="observed")

###Doing the samething on the after taking drugs

with Model() as beta_carotene_model_after_drug:
    mean = Uniform('mean', lower=0, upper=10000)
    sigma = Uniform('sigma', 0, 100)

##Getting Likelihood and we assumed it cacanormal

with beta_carotene_model_after_drug:
    print(beat_carotene_levels_after_drug)
    y = Normal('beat_carotene_levels_after_drug',