Esempio n. 1
0
def check_jacobian_det(transform,
                       domain,
                       constructor=at.dscalar,
                       test=0,
                       make_comparable=None,
                       elemwise=False):
    y = constructor("y")
    y.tag.test_value = test

    x = transform.backward(y)
    if make_comparable:
        x = make_comparable(x)

    if not elemwise:
        jac = at.log(at.nlinalg.det(jacobian(x, [y])))
    else:
        jac = at.log(at.abs_(at.diag(jacobian(x, [y]))))

    # ljd = log jacobian det
    actual_ljd = aesara.function([y], jac)

    computed_ljd = aesara.function([y],
                                   at.as_tensor_variable(
                                       transform.jacobian_det(y)),
                                   on_unused_input="ignore")

    for yval in domain.vals:
        close_to(actual_ljd(yval), computed_ljd(yval), tol)
Esempio n. 2
0
def test_stickbreaking_accuracy():
    val = np.array([-30])
    x = at.dvector("x")
    x.tag.test_value = val
    identity_f = aesara.function([x],
                                 tr.stick_breaking.forward(
                                     tr.stick_breaking.backward(x)))
    close_to(val, identity_f(val), tol)
Esempio n. 3
0
def check_transform(transform, domain, constructor=at.dscalar, test=0):
    x = constructor("x")
    x.tag.test_value = test
    # test forward and forward_val
    forward_f = aesara.function([x], transform.forward(x))
    # test transform identity
    identity_f = aesara.function([x], transform.backward(transform.forward(x)))
    for val in domain.vals:
        close_to(val, identity_f(val), tol)
        close_to(transform.forward_val(val), forward_f(val), tol)
Esempio n. 4
0
def test_stickbreaking_bounds():
    vals = get_values(tr.stick_breaking, Vector(R, 2), at.dvector,
                      np.array([0, 0]))

    close_to(vals.sum(axis=1), 1, tol)
    close_to_logical(vals > 0, True, tol)
    close_to_logical(vals < 1, True, tol)

    check_jacobian_det(tr.stick_breaking, Vector(R, 2), at.dvector,
                       np.array([0, 0]), lambda x: x[:-1])
Esempio n. 5
0
    def check_transform_elementwise_logp(self, model):
        x0 = model.deterministics[0]
        x = model.free_RVs[0]
        assert x.ndim == x.logp_elemwiset.ndim

        pt = model.test_point
        array = np.random.randn(*pt[x.name].shape)
        pt[x.name] = array
        dist = x.distribution
        logp_nojac = x0.distribution.logp(dist.transform_used.backward(array))
        jacob_det = dist.transform_used.jacobian_det(aesara.shared(array))
        assert x.logp_elemwiset.ndim == jacob_det.ndim

        elementwiselogp = logp_nojac + jacob_det

        close_to(x.logp_elemwise(pt), elementwiselogp.eval(), tol)
Esempio n. 6
0
    def check_vectortransform_elementwise_logp(self, model, vect_opt=0):
        x = model.free_RVs[0]
        x0 = x.tag.value_var
        assert (x.ndim - 1) == logpt(x).ndim

        pt = model.initial_point
        array = np.random.randn(*pt[x0.name].shape)
        transform = x0.tag.transform
        logp_nojac = logpt(x, transform.backward(x, array), transformed=False)

        jacob_det = transform.jacobian_det(x, aesara.shared(array))
        assert logpt(x).ndim == jacob_det.ndim

        # Hack to get relative tolerance
        a = logpt(x, array.astype(aesara.config.floatX), jacobian=False).eval()
        b = logp_nojac.eval()
        close_to(a, b, np.abs(0.5 * (a + b) * tol))
Esempio n. 7
0
def check_transform(transform,
                    domain,
                    constructor=at.dscalar,
                    test=0,
                    rv_var=None):
    x = constructor("x")
    x.tag.test_value = test
    if rv_var is None:
        rv_var = x
    # test forward and forward_val
    # FIXME: What's being tested here?  That the transformed graph can compile?
    forward_f = aesara.function([x], transform.forward(rv_var, x))
    # test transform identity
    identity_f = aesara.function([x],
                                 transform.backward(
                                     rv_var, transform.forward(rv_var, x)))
    for val in domain.vals:
        close_to(val, identity_f(val), tol)
Esempio n. 8
0
    def check_transform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x0 = x.tag.value_var
        assert x.ndim == logpt(x).ndim

        pt = model.initial_point
        array = np.random.randn(*pt[x0.name].shape)
        transform = x0.tag.transform
        logp_notrans = logpt(x,
                             transform.backward(x, array),
                             transformed=False)

        jacob_det = transform.jacobian_det(x, aesara.shared(array))
        assert logpt(x).ndim == jacob_det.ndim

        v1 = logpt(x, array, jacobian=False).eval()
        v2 = logp_notrans.eval()
        close_to(v1, v2, tol)
Esempio n. 9
0
def test_find_MAP_discrete():
    tol = 2.0**-11
    alpha = 4
    beta = 4
    n = 20
    yes = 15

    with Model() as model:
        p = Beta("p", alpha, beta)
        Binomial("ss", n=n, p=p)
        Binomial("s", n=n, p=p, observed=yes)

        map_est1 = starting.find_MAP()
        map_est2 = starting.find_MAP(vars=model.vars)

    close_to(map_est1["p"], 0.6086956533498806, tol)

    close_to(map_est2["p"], 0.695642178810167, tol)
    assert map_est2["ss"] == 14
Esempio n. 10
0
    def check_vectortransform_elementwise_logp(self, model, vect_opt=0):
        x0 = model.deterministics[0]
        x = model.free_RVs[0]
        assert (x.ndim - 1) == x.logp_elemwiset.ndim

        pt = model.test_point
        array = np.random.randn(*pt[x.name].shape)
        pt[x.name] = array
        dist = x.distribution
        logp_nojac = x0.distribution.logp(dist.transform_used.backward(array))
        jacob_det = dist.transform_used.jacobian_det(aesara.shared(array))
        assert x.logp_elemwiset.ndim == jacob_det.ndim

        if vect_opt == 0:
            # the original distribution is univariate
            elementwiselogp = logp_nojac.sum(axis=-1) + jacob_det
        else:
            elementwiselogp = logp_nojac + jacob_det
        # Hack to get relative tolerance
        a = x.logp_elemwise(pt)
        b = elementwiselogp.eval()
        close_to(a, b, np.abs(0.5 * (a + b) * tol))
Esempio n. 11
0
def test_find_MAP():
    tol = 2.0**-11  # 16 bit machine epsilon, a low bar
    data = np.random.randn(100)
    # data should be roughly mean 0, std 1, but let's
    # normalize anyway to get it really close
    data = (data - np.mean(data)) / np.std(data)

    with Model():
        mu = Uniform("mu", -1, 1)
        sigma = Uniform("sigma", 0.5, 1.5)
        Normal("y", mu=mu, tau=sigma**-2, observed=data)

        # Test gradient minimization
        map_est1 = starting.find_MAP(progressbar=False)
        # Test non-gradient minimization
        map_est2 = starting.find_MAP(progressbar=False, method="Powell")

    close_to(map_est1["mu"], 0, tol)
    close_to(map_est1["sigma"], 1, tol)

    close_to(map_est2["mu"], 0, tol)
    close_to(map_est2["sigma"], 1, tol)
Esempio n. 12
0
def test_dlogp():
    start, model, (mu, sig) = simple_model()
    dlogp = model.fastdlogp()
    close_to(dlogp(start), -(start["x"] - mu) / sig ** 2, 1.0 / sig ** 2 / 100.0)
Esempio n. 13
0
def test_logp():
    start, model, (mu, sig) = simple_model()
    lp = model.fastlogp
    lp(start)
    close_to(lp(start), sp.norm.logpdf(start["x"], mu, sig).sum(), tol)
Esempio n. 14
0
def check_vals(fn1, fn2, *args):
    v = fn1(*args)
    close_to(v, fn2(*args), 1e-6 if v.dtype == np.float64 else 1e-4)
Esempio n. 15
0
def test_accuracy_non_normal():
    _, model, (mu, _) = non_normal(4)
    with model:
        newstart = find_MAP(Point(x=[0.5, 0.01, 0.95, 0.99]))
        close_to(newstart["x"], mu,
                 select_by_precision(float64=1e-5, float32=1e-4))
Esempio n. 16
0
def test_dlogp2():
    start, model, (_, sig) = mv_simple()
    H = np.linalg.inv(sig)
    d2logp = model.fastd2logp()
    close_to(d2logp(start), H, np.abs(H / 100.0))
Esempio n. 17
0
def check_vals(fn1, fn2, *args):
    v = fn1(*args)
    close_to(v, fn2(*args), 1e-6)
Esempio n. 18
0
def test_accuracy_normal():
    _, model, (mu, _) = simple_model()
    with model:
        newstart = find_MAP(Point(x=[-10.5, 100.5]))
        close_to(newstart["x"], [mu, mu],
                 select_by_precision(float64=1e-5, float32=1e-4))