示例#1
0
def check_jacobian_det(
    transform,
    domain,
    constructor=at.dscalar,
    test=0,
    make_comparable=None,
    elemwise=False,
    rv_var=None,
):
    y = constructor("y")
    y.tag.test_value = test

    if rv_var is None:
        rv_var = y

    rv_inputs = rv_var.owner.inputs if rv_var.owner else []

    x = transform.backward(y, *rv_inputs)
    if make_comparable:
        x = make_comparable(x)

    if not elemwise:
        jac = at.log(at.nlinalg.det(jacobian(x, [y])))
    else:
        jac = at.log(at.abs_(at.diag(jacobian(x, [y]))))

    # ljd = log jacobian det
    actual_ljd = aesara.function([y], jac)

    computed_ljd = aesara.function(
        [y], at.as_tensor_variable(transform.log_jac_det(y, *rv_inputs)), on_unused_input="ignore"
    )

    for yval in domain.vals:
        close_to(actual_ljd(yval), computed_ljd(yval), tol)
示例#2
0
    def check_vectortransform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x_val_transf = x.tag.value_var

        pt = model.initial_point(0)
        test_array_transf = floatX(
            np.random.randn(*pt[x_val_transf.name].shape))
        transform = x_val_transf.tag.transform
        test_array_untransf = transform.backward(test_array_transf,
                                                 *x.owner.inputs).eval()

        # Create input variable with same dimensionality as untransformed test_array
        x_val_untransf = at.constant(test_array_untransf).type()

        jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs)
        # Original distribution is univariate
        if x.owner.op.ndim_supp == 0:
            assert joint_logpt(
                x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1)
        # Original distribution is multivariate
        else:
            assert joint_logpt(
                x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim

        a = joint_logpt(x, x_val_transf,
                        jacobian=False).eval({x_val_transf: test_array_transf})
        b = joint_logpt(x, x_val_untransf, transformed=False).eval(
            {x_val_untransf: test_array_untransf})
        # Hack to get relative tolerance
        close_to(a, b, np.abs(0.5 * (a + b) * tol))
示例#3
0
def test_simplex_bounds():
    vals = get_values(tr.simplex, Vector(R, 2), at.dvector, np.array([0, 0]))

    close_to(vals.sum(axis=1), 1, tol)
    close_to_logical(vals > 0, True, tol)
    close_to_logical(vals < 1, True, tol)

    check_jacobian_det(tr.simplex, Vector(R, 2), at.dvector, np.array([0, 0]), lambda x: x[:-1])
示例#4
0
def test_simplex_accuracy():
    val = np.array([-30])
    x = at.dvector("x")
    x.tag.test_value = val
    identity_f = aesara.function([x],
                                 tr.simplex.forward(x,
                                                    tr.simplex.backward(x, x)))
    close_to(val, identity_f(val), tol)
示例#5
0
def check_transform(transform, domain, constructor=at.dscalar, test=0, rv_var=None):
    x = constructor("x")
    x.tag.test_value = test
    if rv_var is None:
        rv_var = x
    rv_inputs = rv_var.owner.inputs if rv_var.owner else []
    # test forward and forward_val
    # FIXME: What's being tested here?  That the transformed graph can compile?
    forward_f = aesara.function([x], transform.forward(x, *rv_inputs))
    # test transform identity
    identity_f = aesara.function(
        [x], transform.backward(transform.forward(x, *rv_inputs), *rv_inputs)
    )
    for val in domain.vals:
        close_to(val, identity_f(val), tol)
示例#6
0
def test_find_MAP_discrete():
    tol = 2.0**-11
    alpha = 4
    beta = 4
    n = 20
    yes = 15

    with Model() as model:
        p = Beta("p", alpha, beta)
        Binomial("ss", n=n, p=p)
        Binomial("s", n=n, p=p, observed=yes)

        map_est1 = starting.find_MAP()
        map_est2 = starting.find_MAP(vars=model.value_vars)

    close_to(map_est1["p"], 0.6086956533498806, tol)

    close_to(map_est2["p"], 0.695642178810167, tol)
    assert map_est2["ss"] == 14
示例#7
0
    def check_transform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x0 = x.tag.value_var
        assert x.ndim == logpt(x, sum=False).ndim

        pt = model.initial_point
        array = np.random.randn(*pt[x0.name].shape)
        transform = x0.tag.transform
        logp_notrans = logpt(x,
                             transform.backward(array, *x.owner.inputs),
                             transformed=False)

        jacob_det = transform.log_jac_det(aesara.shared(array),
                                          *x.owner.inputs)
        assert logpt(x, sum=False).ndim == jacob_det.ndim

        v1 = logpt(x, array, jacobian=False).eval()
        v2 = logp_notrans.eval()
        close_to(v1, v2, tol)
示例#8
0
def test_find_MAP_discrete():
    tol = 2.0**-11
    alpha = 4
    beta = 4
    n = 20
    yes = 15

    with Model() as model:
        p = Beta('p', alpha, beta)
        ss = Binomial('ss', n=n, p=p)
        s = Binomial('s', n=n, p=p, observed=yes)

        map_est1 = starting.find_MAP()
        map_est2 = starting.find_MAP(vars=model.vars)

    close_to(map_est1['p'], 0.6086956533498806, tol)
    assert 'ss' not in map_est1

    close_to(map_est2['p'], 0.695642178810167, tol)
    assert map_est2['ss'] == 14
示例#9
0
    def check_transform_elementwise_logp(self, model):
        x = model.free_RVs[0]
        x_val_transf = x.tag.value_var

        pt = model.compute_initial_point(0)
        test_array_transf = floatX(np.random.randn(*pt[x_val_transf.name].shape))
        transform = x_val_transf.tag.transform
        test_array_untransf = transform.backward(test_array_transf, *x.owner.inputs).eval()

        # Create input variable with same dimensionality as untransformed test_array
        x_val_untransf = at.constant(test_array_untransf).type()

        jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs)
        assert joint_logpt(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim

        v1 = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf})
        v2 = joint_logpt(x, x_val_untransf, transformed=False).eval(
            {x_val_untransf: test_array_untransf}
        )
        close_to(v1, v2, tol)
示例#10
0
    def check_vectortransform_elementwise_logp(self, model, vect_opt=0):
        x = model.free_RVs[0]
        x0 = x.tag.value_var
        # TODO: For some reason the ndim relations
        # dont hold up here. But final log-probablity
        # values are what we expected.
        # assert (x.ndim - 1) == logpt(x, sum=False).ndim

        pt = model.initial_point
        array = np.random.randn(*pt[x0.name].shape)
        transform = x0.tag.transform
        logp_nojac = logpt(x,
                           transform.backward(array, *x.owner.inputs),
                           transformed=False)

        jacob_det = transform.log_jac_det(aesara.shared(array),
                                          *x.owner.inputs)
        # assert logpt(x).ndim == jacob_det.ndim

        # Hack to get relative tolerance
        a = logpt(x, array.astype(aesara.config.floatX), jacobian=False).eval()
        b = logp_nojac.eval()
        close_to(a, b, np.abs(0.5 * (a + b) * tol))
示例#11
0
def test_find_MAP():
    tol = 2.0**-11  # 16 bit machine epsilon, a low bar
    data = np.random.randn(100)
    # data should be roughly mean 0, std 1, but let's
    # normalize anyway to get it really close
    data = (data - np.mean(data)) / np.std(data)

    with Model():
        mu = Uniform("mu", -1, 1)
        sigma = Uniform("sigma", 0.5, 1.5)
        Normal("y", mu=mu, tau=sigma**-2, observed=data)

        # Test gradient minimization
        map_est1 = starting.find_MAP(progressbar=False)
        # Test non-gradient minimization
        map_est2 = starting.find_MAP(progressbar=False, method="Powell")

    close_to(map_est1["mu"], 0, tol)
    close_to(map_est1["sigma"], 1, tol)

    close_to(map_est2["mu"], 0, tol)
    close_to(map_est2["sigma"], 1, tol)
示例#12
0
def test_find_MAP():
    tol = 2.0**-11  # 16 bit machine epsilon, a low bar
    data = np.random.randn(100)
    # data should be roughly mean 0, std 1, but let's
    # normalize anyway to get it really close
    data = (data-np.mean(data))/np.std(data)

    with Model() as model:
        mu = Uniform('mu', -1, 1)
        sigma = Uniform('sigma', .5, 1.5)
        y = Normal('y', mu=mu, tau=sigma**-2, observed=data)

        # Test gradient minimization
        map_est1 = starting.find_MAP()
        # Test non-gradient minimization
        map_est2 = starting.find_MAP(fmin=starting.optimize.fmin_powell)

    close_to(map_est1['mu'], 0, tol)
    close_to(map_est1['sigma'], 1, tol)

    close_to(map_est2['mu'], 0, tol)
    close_to(map_est2['sigma'], 1, tol)
示例#13
0
def check_vals(fn1, fn2, *args):
    v = fn1(*args)
    close_to(v, fn2(*args), 1e-6 if v.dtype == np.float64 else 1e-4)
示例#14
0
def test_accuracy_non_normal():
    _, model, (mu, _) = non_normal(4)
    with model:
        newstart = find_MAP(Point(x=[0.5, 0.01, 0.95, 0.99]))
        close_to(newstart["x"], mu,
                 select_by_precision(float64=1e-5, float32=1e-4))
示例#15
0
def test_accuracy_normal():
    _, model, (mu, _) = simple_model()
    with model:
        newstart = find_MAP(Point(x=[-10.5, 100.5]))
        close_to(newstart["x"], [mu, mu],
                 select_by_precision(float64=1e-5, float32=1e-4))
示例#16
0
def test_dlogp2():
    start, model, (_, sig) = mv_simple()
    H = np.linalg.inv(sig)
    d2logp = model.fastd2logp()
    close_to(d2logp(start), H, np.abs(H / 100.0))
示例#17
0
def test_dlogp():
    start, model, (mu, sig) = simple_model()
    dlogp = model.fastdlogp()
    close_to(dlogp(start), -(start["x"] - mu) / sig**2, 1.0 / sig**2 / 100.0)
示例#18
0
def test_logp():
    start, model, (mu, sig) = simple_model()
    lp = model.fastlogp
    lp(start)
    close_to(lp(start), sp.norm.logpdf(start["x"], mu, sig).sum(), tol)