Пример #1
0
    def setUp(self):
        extra1 = at.iscalar("extra1")
        extra1_ = np.array(0, dtype=extra1.dtype)
        extra1.dshape = tuple()
        extra1.dsize = 1

        val1 = at.vector("val1")
        val1_ = np.zeros(3, dtype=val1.dtype)
        val1.dshape = (3, )
        val1.dsize = 3

        val2 = at.matrix("val2")
        val2_ = np.zeros((2, 3), dtype=val2.dtype)
        val2.dshape = (2, 3)
        val2.dsize = 6

        self.val1, self.val1_ = val1, val1_
        self.val2, self.val2_ = val2, val2_
        self.extra1, self.extra1_ = extra1, extra1_

        self.cost = extra1 * val1.sum() + val2.sum()

        self.f_grad = ValueGradFunction([self.cost], [val1, val2],
                                        {extra1: extra1_},
                                        mode="FAST_COMPILE")
Пример #2
0
 def test_invalid_type(self):
     a = at.ivector("a")
     a.tag.test_value = np.zeros(3, dtype=a.dtype)
     a.dshape = (3, )
     a.dsize = 3
     with pytest.raises(TypeError) as err:
         ValueGradFunction([a.sum()], [a], {}, mode="FAST_COMPILE")
     err.match("Invalid dtype")
Пример #3
0
 def test_no_extra(self):
     a = at.vector("a")
     a.tag.test_value = np.zeros(3, dtype=a.dtype)
     f_grad = ValueGradFunction([a.sum()], [a], {}, mode="FAST_COMPILE")
     assert f_grad._extra_vars == []
Пример #4
0
class TestValueGradFunction(unittest.TestCase):
    def test_no_extra(self):
        a = at.vector("a")
        a.tag.test_value = np.zeros(3, dtype=a.dtype)
        f_grad = ValueGradFunction([a.sum()], [a], {}, mode="FAST_COMPILE")
        assert f_grad._extra_vars == []

    def test_invalid_type(self):
        a = at.ivector("a")
        a.tag.test_value = np.zeros(3, dtype=a.dtype)
        a.dshape = (3, )
        a.dsize = 3
        with pytest.raises(TypeError) as err:
            ValueGradFunction([a.sum()], [a], {}, mode="FAST_COMPILE")
        err.match("Invalid dtype")

    def setUp(self):
        extra1 = at.iscalar("extra1")
        extra1_ = np.array(0, dtype=extra1.dtype)
        extra1.dshape = tuple()
        extra1.dsize = 1

        val1 = at.vector("val1")
        val1_ = np.zeros(3, dtype=val1.dtype)
        val1.dshape = (3, )
        val1.dsize = 3

        val2 = at.matrix("val2")
        val2_ = np.zeros((2, 3), dtype=val2.dtype)
        val2.dshape = (2, 3)
        val2.dsize = 6

        self.val1, self.val1_ = val1, val1_
        self.val2, self.val2_ = val2, val2_
        self.extra1, self.extra1_ = extra1, extra1_

        self.cost = extra1 * val1.sum() + val2.sum()

        self.f_grad = ValueGradFunction([self.cost], [val1, val2],
                                        {extra1: extra1_},
                                        mode="FAST_COMPILE")

    def test_extra_not_set(self):
        with pytest.raises(ValueError) as err:
            self.f_grad.get_extra_values()
        err.match("Extra values are not set")

        with pytest.raises(ValueError) as err:
            size = self.val1_.size + self.val2_.size
            self.f_grad(np.zeros(size, dtype=self.f_grad.dtype))
        err.match("Extra values are not set")

    def test_grad(self):
        self.f_grad.set_extra_values({"extra1": 5})
        size = self.val1_.size + self.val2_.size
        array = RaveledVars(
            np.ones(size, dtype=self.f_grad.dtype),
            (
                ("val1", self.val1_.shape, self.val1_.dtype),
                ("val2", self.val2_.shape, self.val2_.dtype),
            ),
        )
        val, grad = self.f_grad(array)
        assert val == 21
        npt.assert_allclose(grad, [5, 5, 5, 1, 1, 1, 1, 1, 1])

    @pytest.mark.xfail(reason="Test not refactored for v4")
    def test_edge_case(self):
        # Edge case discovered in #2948
        ndim = 3
        with pm.Model() as m:
            pm.LogNormal("sigma",
                         mu=np.zeros(ndim),
                         tau=np.ones(ndim),
                         shape=ndim)  # variance for the correlation matrix
            pm.HalfCauchy("nu", beta=10)
            step = pm.NUTS()

        func = step._logp_dlogp_func
        func.set_extra_values(m.initial_point)
        q = func.dict_to_array(m.initial_point)
        logp, dlogp = func(q)
        assert logp.size == 1
        assert dlogp.size == 4
        npt.assert_allclose(dlogp, 0.0, atol=1e-5)

    def test_missing_data(self):
        # Originally from a case described in #3122
        X = np.random.binomial(1, 0.5, 10)
        X[0] = -1  # masked a single value
        X = np.ma.masked_values(X, value=-1)
        with pm.Model() as m:
            x1 = pm.Uniform("x1", 0.0, 1.0)
            x2 = pm.Bernoulli("x2", x1, observed=X)

        gf = m.logp_dlogp_function()
        gf._extra_are_set = True

        assert m["x2_missing"].type == gf._extra_vars_shared["x2_missing"].type

        pnt = m.test_point.copy()
        del pnt["x2_missing"]

        res = [
            gf(DictToArrayBijection.map(Point(pnt, model=m))) for i in range(5)
        ]

        assert reduce(lambda x, y: np.array_equal(x, y) and y,
                      res) is not False

    def test_aesara_switch_broadcast_edge_cases_1(self):
        # Tests against two subtle issues related to a previous bug in Theano
        # where `tt.switch` would not always broadcast tensors with single
        # values https://github.com/pymc-devs/aesara/issues/270

        # Known issue 1: https://github.com/pymc-devs/pymc/issues/4389
        data = pm.floatX(np.zeros(10))
        with pm.Model() as m:
            p = pm.Beta("p", 1, 1)
            obs = pm.Bernoulli("obs", p=p, observed=data)

        npt.assert_allclose(
            logpt_sum(obs).eval({p.tag.value_var: pm.floatX(np.array(0.0))}),
            np.log(0.5) * 10,
        )

    def test_aesara_switch_broadcast_edge_cases_2(self):
        # Known issue 2: https://github.com/pymc-devs/pymc/issues/4417
        # fmt: off
        data = np.array([
            1.35202174,
            -0.83690274,
            1.11175166,
            1.29000367,
            0.21282749,
            0.84430966,
            0.24841369,
            0.81803141,
            0.20550244,
            -0.45016253,
        ])
        # fmt: on
        with pm.Model() as m:
            mu = pm.Normal("mu", 0, 5)
            obs = pm.TruncatedNormal("obs",
                                     mu=mu,
                                     sigma=1,
                                     lower=-1,
                                     upper=2,
                                     observed=data)

        npt.assert_allclose(m.dlogp([m.rvs_to_values[mu]])({
            "mu": 0
        }),
                            2.499424682024436,
                            rtol=1e-5)