示例#1
0
def incomplete_beta_ps(a, b, value):
    """Power series for incomplete beta
    Use when b*x is small and value not too close to 1.
    Based on Cephes library by Steve Moshier (incbet.c)
    """
    one = aet.constant(1, dtype="float64")
    ai = one / a
    u = (one - b) * value
    t1 = u / (a + one)
    t = u
    threshold = np.MachAr().eps * ai
    s = aet.constant(0, dtype="float64")

    def _step(i, t, s):
        t *= (i - b) * value / i
        step = t / (a + i)
        s += step
        return ((t, s), until(aet.abs_(step) < threshold))

    (t, s), _ = scan(_step,
                     sequences=[aet.arange(2, 302)],
                     outputs_info=[e for e in aet.cast((t, s), "float64")])

    s = s[-1] + t1 + ai

    t = gammaln(a +
                b) - gammaln(a) - gammaln(b) + a * aet.log(value) + aet.log(s)
    return aet.exp(t)
示例#2
0
    def test_constant_output(self):
        # Test that if the output is a constant, we respect the aesara memory interface
        f = function([], aet.constant([4]))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()
        # If the following 2 asserts fail it mean Aesara broke it's memory contract.
        assert out2 is not out
        assert (out2 == 4).all()

        # Test that if the output is a constant and borrow, we respect the aesara memory interface
        f = function([], Out(aet.constant([4]), borrow=True))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()

        if isinstance(get_default_mode(), DebugMode):
            # In DebugMode, we don't implement optimization based on borrow on the output.
            assert (out2 == 4).all()
        else:
            assert out2 is out
            assert (out2 == 3).all()
示例#3
0
def test_draw_value():
    npt.assert_equal(_draw_value(np.array([5, 6])), [5, 6])
    npt.assert_equal(_draw_value(np.array(5.0)), 5)

    npt.assert_equal(_draw_value(aet.constant([5.0, 6.0])), [5, 6])
    assert _draw_value(aet.constant(5)) == 5
    npt.assert_equal(_draw_value(2 * aet.constant([5.0, 6.0])), [10, 12])

    val = aesara.shared(np.array([5.0, 6.0]))
    npt.assert_equal(_draw_value(val), [5, 6])
    npt.assert_equal(_draw_value(2 * val), [10, 12])

    a = aet.scalar("a")
    a.tag.test_value = 6
    npt.assert_equal(_draw_value(2 * a, givens=[(a, 1)]), 2)

    assert _draw_value(5) == 5
    assert _draw_value(5.0) == 5
    assert isinstance(_draw_value(5.0), type(5.0))
    assert isinstance(_draw_value(5), type(5))

    with pm.Model():
        mu = 2 * aet.constant(np.array([5.0, 6.0])) + aesara.shared(
            np.array(5))
        a = pm.Normal("a", mu=mu, sigma=5, shape=2)

    val1 = _draw_value(a)
    val2 = _draw_value(a)
    assert np.all(val1 != val2)

    with pytest.raises(ValueError) as err:
        _draw_value([])
    err.match("Unexpected type")
示例#4
0
    def test_observed_with_column_vector(self):
        """This test is related to https://github.com/pymc-devs/aesara/issues/390 which breaks
        broadcastability of column-vector RVs. This unexpected change in type can lead to
        incompatibilities during graph rewriting for model.logp evaluation.
        """
        with pm.Model() as model:
            # The `observed` is a broadcastable column vector
            obs = at.as_tensor_variable(
                np.ones((3, 1), dtype=aesara.config.floatX))
            assert obs.broadcastable == (False, True)

            # Both shapes describe broadcastable volumn vectors
            size64 = at.constant([3, 1], dtype="int64")
            # But the second shape is upcasted from an int32 vector
            cast64 = at.cast(at.constant([3, 1], dtype="int32"), dtype="int64")

            pm.Normal("size64", mu=0, sigma=1, size=size64, observed=obs)
            pm.Normal("shape64", mu=0, sigma=1, shape=size64, observed=obs)
            model.logp()

            pm.Normal("size_cast64", mu=0, sigma=1, size=cast64, observed=obs)
            pm.Normal("shape_cast64",
                      mu=0,
                      sigma=1,
                      shape=cast64,
                      observed=obs)
            model.logp()
示例#5
0
    def rv_op(cls, dist, lower=None, upper=None, size=None, rngs=None):

        lower = at.constant(
            -np.inf) if lower is None else at.as_tensor_variable(lower)
        upper = at.constant(
            np.inf) if upper is None else at.as_tensor_variable(upper)

        # When size is not specified, dist may have to be broadcasted according to lower/upper
        dist_shape = size if size is not None else at.broadcast_shape(
            dist, lower, upper)
        dist = change_rv_size(dist, dist_shape)

        # Censoring is achieved by clipping the base distribution between lower and upper
        rv_out = at.clip(dist, lower, upper)

        # Reference nodes to facilitate identification in other classmethods, without
        # worring about possible dimshuffles
        rv_out.tag.dist = dist
        rv_out.tag.lower = lower
        rv_out.tag.upper = upper

        if rngs is not None:
            rv_out = cls._change_rngs(rv_out, rngs)

        return rv_out
示例#6
0
def test_interval_transform_raises():
    with pytest.raises(ValueError, match="Lower and upper interval bounds cannot both be None"):
        tr.Interval(None, None)

    with pytest.raises(ValueError, match="Interval bounds must be constant values"):
        tr.Interval(at.constant(5) + 1, None)

    assert tr.Interval(at.constant(5), None)
示例#7
0
 def __init__(self, name="", model=None):
     super().__init__(name, model)
     assert pm.modelcontext(None) is self
     # 1) init variables with Var method
     self.register_rv(pm.Normal.dist(), "v1")
     self.v2 = pm.Normal("v2", mu=0, sigma=1)
     # 2) Potentials and Deterministic variables with method too
     # be sure that names will not overlap with other same models
     pm.Deterministic("d", at.constant(1))
     pm.Potential("p", at.constant(1))
示例#8
0
    def test_dtype_normal_uniform_687(self):
        # Regression test for #687.
        rng_R = random_state_type()
        assert (uniform(rng_R,
                        low=tensor.constant(0, dtype="float64"),
                        dtype="float32")[1].dtype == "float32")

        assert (normal(rng_R,
                       avg=tensor.constant(0, dtype="float64"),
                       dtype="float32")[1].dtype == "float32")
示例#9
0
    def test_constant(self):
        # Get counter value
        autoname_id = next(Variable.__count__)
        Variable.__count__ = count(autoname_id)
        r1 = at.constant(1.5)
        assert r1.auto_name == "auto_" + str(autoname_id), (
            r1.auto_name,
            "auto_" + str(autoname_id),
        )

        r3 = at.constant(1.6)
        assert r3.auto_name == "auto_" + str(autoname_id + 1)
示例#10
0
    def test_vals(self):
        npt.assert_equal(draw_values([np.array([5, 6])])[0], [5, 6])
        npt.assert_equal(draw_values([np.array(5.0)])[0], 5)

        npt.assert_equal(draw_values([aet.constant([5.0, 6.0])])[0], [5, 6])
        assert draw_values([aet.constant(5)])[0] == 5
        npt.assert_equal(
            draw_values([2 * aet.constant([5.0, 6.0])])[0], [10, 12])

        val = aesara.shared(np.array([5.0, 6.0]))
        npt.assert_equal(draw_values([val])[0], [5, 6])
        npt.assert_equal(draw_values([2 * val])[0], [10, 12])
示例#11
0
def mv_simple():
    mu = floatX_array([-0.1, 0.5, 1.1])
    p = floatX_array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]])
    tau = np.dot(p, p.T)
    with pm.Model() as model:
        pm.MvNormal(
            "x",
            at.constant(mu),
            tau=at.constant(tau),
            initval=floatX_array([0.1, 1.0, 0.8]),
        )
    H = tau
    C = np.linalg.inv(H)
    return model.initial_point, model, (mu, C)
    def test_constant(self):
        x = tt.constant(np.random.rand(2, 3), dtype=config.floatX)
        y = aesara.shared(np.random.rand(3, 6).astype(config.floatX), "y")

        # should work
        z = tt.dot(x, y)
        assert hasattr(z.tag, "test_value")
        f = aesara.function([], z)
        assert _allclose(f(), z.tag.test_value)

        # this test should fail
        x = tt.constant(np.random.rand(2, 4), dtype=config.floatX)
        with pytest.raises(ValueError):
            tt.dot(x, y)
示例#13
0
文件: models.py 项目: YRApril/LiJia
def mv_simple_very_coarse():
    mu = floatX_array([-0.3, 0.7, 1.3])
    p = floatX_array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]])
    tau = np.dot(p, p.T)
    with pm.Model() as model:
        pm.MvNormal(
            "x",
            at.constant(mu),
            tau=at.constant(tau),
            shape=3,
            testval=floatX_array([0.1, 1.0, 0.8]),
        )
    H = tau
    C = np.linalg.inv(H)
    return model.test_point, model, (mu, C)
示例#14
0
    def test_vector_components(self):
        nd = 3
        npop = 4
        # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
        mus = at.constant(np.full((nd, npop), np.arange(npop)))

        with Model(rng_seeder=self.get_random_state()) as model:
            m = Mixture(
                "m",
                w=np.ones(npop) / npop,
                # MvNormal distribution with squared sigma diagonal covariance should
                # be equal to vector of Normals from latent_m
                comp_dists=[MvNormal.dist(mus[:, i], np.eye(nd) * 1e-5**2) for i in range(npop)],
            )
            z = Categorical("z", p=np.ones(npop) / npop)
            latent_m = Normal("latent_m", mu=mus[..., z], sigma=1e-5, shape=nd)

        size = 100
        m_val = draw(m, draws=size)
        latent_m_val = draw(latent_m, draws=size)
        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 comes from the same mixture
        # component
        assert np.all(np.diff(m_val) < 1e-3)
        assert np.all(np.diff(latent_m_val) < 1e-3)
        # TODO: The following statistical test appears to be more flaky than expected
        #  even though the  distributions should be the same. Seeding should make it
        #  stable but might be worth investigating further
        self.samples_from_same_distribution(m_val, latent_m_val)

        # Check that mixing of values in the last axis leads to smaller logp
        logp_fn = model.compile_logp(vars=[m])
        assert logp_fn({"m": [0, 0, 0]}) > logp_fn({"m": [0, 1, 0]}) > logp_fn({"m": [0, 1, 2]})
        self.logp_matches(m, latent_m, z, npop, model=model)
示例#15
0
    def test_scalar_components(self):
        nd = 3
        npop = 4
        # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
        mus = at.constant(np.full((nd, npop), np.arange(npop)))

        with Model(rng_seeder=self.get_random_state()) as model:
            m = NormalMixture(
                "m",
                w=np.ones(npop) / npop,
                mu=mus,
                sigma=1e-5,
                comp_shape=(nd, npop),
                shape=nd,
            )
            z = Categorical("z", p=np.ones(npop) / npop, shape=nd)
            mu = at.as_tensor_variable([mus[i, z[i]] for i in range(nd)])
            latent_m = Normal("latent_m", mu=mu, sigma=1e-5, shape=nd)

        size = 100
        m_val = draw(m, draws=size)
        latent_m_val = draw(latent_m, draws=size)

        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 can come from independent
        # components
        assert not all(np.all(np.diff(m_val) < 1e-3, axis=-1))
        assert not all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))
        self.samples_from_same_distribution(m_val, latent_m_val)

        # Check that logp is the same whether elements of the last axis are mixed or not
        logp_fn = model.compile_logp(vars=[m])
        assert np.isclose(logp_fn({"m": [0, 0, 0]}), logp_fn({"m": [0, 1, 2]}))
        self.logp_matches(m, latent_m, z, npop, model=model)
示例#16
0
 def __init__(self, mean=0, sigma=1, name="", model=None):
     super().__init__(name, model)
     self.Var("v1", Normal.dist(mu=mean, sigma=sigma))
     Normal("v2", mu=mean, sigma=sigma)
     Normal("v3", mu=mean, sigma=HalfCauchy("sd", beta=10, testval=1.0))
     Deterministic("v3_sq", self.v3 ** 2)
     Potential("p1", aet.constant(1))
示例#17
0
def _replace_shared_variables(
        graph: List[TensorVariable]) -> List[TensorVariable]:
    """Replace shared variables in graph by their constant values

    Raises
    ------
    ValueError
        If any shared variable contains default_updates
    """

    shared_variables = [
        var for var in graph_inputs(graph) if isinstance(var, SharedVariable)
    ]

    if any(hasattr(var, "default_update") for var in shared_variables):
        raise ValueError(
            "Graph contains shared variables with default_update which cannot "
            "be safely replaced.")

    replacements = {
        var: at.constant(var.get_value(borrow=True))
        for var in shared_variables
    }

    new_graph = clone_replace(graph, replace=replacements)
    return new_graph
示例#18
0
 def _filter_grad_var(grad, inp):
     # Returns (filtered_var, overrider_var)
     # Args:
     #     grad: gradient Variable
     #     inp: the corresponding input of gradient Variable
     #
     # a grad() call could return instance of NullType() or DisconnectedType()
     # which cannot be directly used in OfG
     #
     # Since we always use an OfG instance as self._lop_op, the current
     # workaround is to "remember" the special cases of the gradient and
     # replace them after self._lop_op is called.
     #
     # This helper function changes invalid types into a filtered_var,
     # and provides a overrider_var to be replaced at grad() call
     #
     # For now, this converts NullType or DisconnectedType into zeros_like.
     # other types are unmodified: overrider_var -> None
     if isinstance(grad.type, (NullType, DisconnectedType)):
         if hasattr(inp, "zeros_like"):
             return inp.zeros_like(), grad
         else:
             return at.constant(0.0), grad
     else:
         return grad, None
示例#19
0
    def test_composite_elemwise_float16(self):
        w = bvector()
        x = vector(dtype="float16")
        y = fvector()

        cz = tanh(x + aet.cast(y, "float16"))
        o = (
            cz
            - cz ** 2
            + aet.cast(x, "int16")
            + aet.cast(x, "float32")
            + aet.cast(w, "float16")
            - aet.constant(np.float16(1.0))
        )

        aesara.function([w, x, y], o, mode=mode_with_gpu)

        v = vector(dtype="uint8")
        w = vector(dtype="float16")
        x = vector(dtype="float16")
        y = vector(dtype="float16")
        z = vector(dtype="float16")

        o = aet.switch(v, mul(w, x, y), z)
        aesara.function([v, w, x, y, z], o, mode=mode_with_gpu)
示例#20
0
    def check_vectortransform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x_val_transf = x.tag.value_var

        pt = model.initial_point(0)
        test_array_transf = floatX(
            np.random.randn(*pt[x_val_transf.name].shape))
        transform = x_val_transf.tag.transform
        test_array_untransf = transform.backward(test_array_transf,
                                                 *x.owner.inputs).eval()

        # Create input variable with same dimensionality as untransformed test_array
        x_val_untransf = at.constant(test_array_untransf).type()

        jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs)
        # Original distribution is univariate
        if x.owner.op.ndim_supp == 0:
            assert joint_logpt(
                x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1)
        # Original distribution is multivariate
        else:
            assert joint_logpt(
                x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim

        a = joint_logpt(x, x_val_transf,
                        jacobian=False).eval({x_val_transf: test_array_transf})
        b = joint_logpt(x, x_val_untransf, transformed=False).eval(
            {x_val_untransf: test_array_untransf})
        # Hack to get relative tolerance
        close_to(a, b, np.abs(0.5 * (a + b) * tol))
示例#21
0
def choice(random_state,
           size=None,
           a=2,
           replace=True,
           p=None,
           ndim=None,
           dtype="int64"):
    """
    Choose values from `a` with or without replacement. `a` can be a 1-D array
    or a positive scalar. If `a` is a scalar, the samples are drawn from the
    range 0,...,a-1.

    If the size argument is ambiguous on the number of dimensions, ndim
    may be a plain integer to supplement the missing information.

    If size is None, a scalar will be returned.

    """
    a = tensor.as_tensor_variable(a)
    if isinstance(replace, bool):
        replace = tensor.constant(replace, dtype="int8")
    else:
        replace = tensor.as_tensor_variable(replace)
    # encode p=None as an empty vector
    p = tensor.as_tensor_variable(p or [])
    ndim, size, bcast = _infer_ndim_bcast(ndim, size)
    op = RandomFunction(choice_helper,
                        tensor.TensorType(dtype=dtype, broadcastable=bcast))
    return op(random_state, size, a, replace, p)
示例#22
0
    def test_scan(self):
        x = vector("x")

        # we will insert a subgraph involving these variables into the inner
        # graph of scan. since they were not previously in the inner graph,
        # they are like non_sequences to scan(). scan() infers these and
        # imports them into the inner graph properly, and map_variables()
        # should do this as well.
        outer = scalar("outer")
        shared = aesara.shared(np.array(1.0, dtype=aesara.config.floatX),
                               name="shared")
        constant = at.constant(1, name="constant")

        # z will equal 1 so multiplying by it doesn't change any values
        z = outer * (shared + constant)

        def step(x, a):
            r = a + x
            r.tag.replacement = z * (a - x)
            return r

        s, _ = aesara.scan(step, sequences=x, outputs_info=[np.array(0.0)])
        # ensure z is owned by the outer graph so map_variables() will need to
        # jump through additional hoops to placate FunctionGraph.
        t = z * s
        (s2, ) = map_variables(self.replacer, [t])
        t2 = z * s2

        f = aesara.function([x, outer], [t, t2])
        rval = f(x=np.array([1, 2, 3], dtype=np.float32), outer=0.5)
        assert np.array_equal(rval, [[1, 3, 6], [-1, -3, -6]])
示例#23
0
    def test_sparseblockgemv_grad(self):

        W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data()

        iIdx = at.constant(iIdx_val)
        oIdx = at.constant(oIdx_val)

        def metaop(b, h, W):
            return sparse_block_dot(W, h, iIdx, b, oIdx)

        def op(b, h, W):
            return self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)

        eps = 3e-3
        utt.verify_grad(metaop, [b_val, h_val, W_val], mode=self.mode, eps=eps)
        utt.verify_grad(op, [b_val, h_val, W_val], mode=self.mode, eps=eps)
示例#24
0
    def test_opfromgraph(self):
        # as with the scan tests above, insert foreign inputs into the
        # inner graph.
        outer = scalar("outer")
        shared = aesara.shared(np.array(1.0, dtype=aesara.config.floatX),
                               name="shared")
        constant = at.constant(1.0, name="constant")
        z = outer * (shared + constant)

        # construct the inner graph
        a = scalar()
        b = scalar()
        r = a + b
        r.tag.replacement = z * (a - b)

        # construct the outer graph
        c = scalar()
        d = scalar()
        u = aesara.compile.builders.OpFromGraph([a, b], [r])(c, d)
        t = z * u
        (v, ) = map_variables(self.replacer, [t])
        t2 = z * v

        f = aesara.function([c, d, outer], [t, t2])
        for m, n in itertools.combinations(range(10), 2):
            assert f(m, n, outer=0.5) == [m + n, m - n]

        # test that the unsupported case of replacement with a shared
        # variable with updates crashes
        shared.update = shared + 1
        with pytest.raises(NotImplementedError):
            map_variables(self.replacer, [t])
示例#25
0
 def __init__(self, mean=0, sigma=1, name="", model=None):
     super().__init__(name, model)
     self.register_rv(Normal.dist(mu=mean, sigma=sigma), "v1")
     Normal("v2", mu=mean, sigma=sigma)
     Normal("v3", mu=mean, sigma=Normal("sd", mu=10, sigma=1, initval=1.0))
     Deterministic("v3_sq", self.v3**2)
     Potential("p1", at.constant(1))
示例#26
0
    def test_reshape(self):
        new_shape = tensor.constant(
            np.asarray([self.mat_in_shape[0] * self.mat_in_shape[1]], dtype="int64")
        )

        self.check_mat_rop_lop(
            self.mx.reshape(new_shape), (self.mat_in_shape[0] * self.mat_in_shape[1],)
        )
示例#27
0
文件: basic.py 项目: mgorny/aesara
 def make_node(self, x, index, toInsert):
     assert isinstance(x.type, TypedListType)
     assert x.ttype == toInsert.type
     if not isinstance(index, Variable):
         index = at.constant(index, ndim=0, dtype="int64")
     else:
         assert index.dtype == "int64"
         assert isinstance(index, TensorVariable) and index.ndim == 0
     return Apply(self, [x, index, toInsert], [x.type()])
示例#28
0
    def test_sparseblockgemv_grad_1(self):
        # Test that we correctly handle cases where dimensions are 1.
        h_val = randn(1, 1, 1).astype("float32")
        iIdx_val = np.random.permutation(1)[:1][None, :]
        oIdx_val = np.random.permutation(1)[:1][None, :]
        W_val = randn(1, 1, 1, 1).astype("float32")
        b_val = randn(1, 1).astype("float32")

        iIdx = at.constant(iIdx_val)
        oIdx = at.constant(oIdx_val)

        def metaop(b, h, W):
            return sparse_block_dot(W, h, iIdx, b, oIdx)

        def op(b, h, W):
            return self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)

        utt.verify_grad(metaop, [b_val, h_val, W_val], mode=self.mode)
        utt.verify_grad(op, [b_val, h_val, W_val], mode=self.mode)
示例#29
0
 def make_node(self, o, x, y, xIdx, yIdx, alpha=None):
     ctx = infer_context_name(o, x, y)
     one = aet.constant(np.asarray(1.0, dtype="float32"))
     o = as_gpuarray_variable(o, ctx)
     x = as_gpuarray_variable(x, ctx)
     y = as_gpuarray_variable(y, ctx)
     xIdx = as_tensor_variable(xIdx)
     yIdx = as_tensor_variable(yIdx)
     if alpha is None:
         alpha = one
     return Apply(self, [o, x, y, xIdx, yIdx, alpha], [o.type()])
示例#30
0
def _check_size(size):
    """
    Canonicalise inputs to get valid output sizes for Aesara tensors.

    Parameters
    ----------
    size : int_vector_like
        Some variable that could serve as the shape for an Aesara tensor.
        This can be an int, a tuple of ints, a list of ints
        or an Aesara Variable with similar properties.

    Returns
    -------
    size_var : int_vector
        A one-dimensional Aesara variable encapsulating the given size.

    Raises
    ------
    ValueError
        If this method can not build a valid size from the input.
    """
    # non-tuple checks and scalar-to-tuple transform
    if isinstance(size, Variable):
        if size.ndim == 1:
            return size
        elif size.ndim == 0:
            return at.stack([size], ndim=1)
        else:
            raise ValueError(
                "Aesara variable must have 1 dimension to be a valid size.",
                size)
    elif isinstance(size, (np.integer, int)):
        return at.constant([size], ndim=1)
    elif not isinstance(size, (tuple, list)):
        raise ValueError("Size must be a int, tuple, list or Aesara variable.",
                         size)

    # check entries of list or tuple
    for i in size:
        if isinstance(i, Variable):
            if i.ndim != 0:
                raise ValueError("Non-scalar Aesara variable in size", size, i)
        elif isinstance(i, (np.integer, int)):
            if i <= 0:
                raise ValueError(
                    "Non-positive dimensions not allowed in size.", size, i)
        else:
            raise ValueError(
                "Only Aesara variables and integers are allowed in a size-tuple.",
                size,
                i,
            )

    return at.as_tensor_variable(size, ndim=1)