Beispiel #1
0
    def test_jacobian(self, x, coefficient):
        factor = mp.Factor(lambda p: coefficient * p, p=x)

        assert factor.jacobian(
            {x: 2},
            [x],
        )[x] == pytest.approx(coefficient)
Beispiel #2
0
def make_model_approx(centres, widths):
    centres_ = [Variable(f"x_{i}") for i in range(n)]
    mu_ = Variable("mu")
    logt_ = Variable("logt")

    centre_likelihoods = [
        NormalMessage(c, w).as_factor(x)
        for c, w, x in zip(centres, widths, centres_)
    ]
    normal_likelihoods = [
        graph.Factor(
            normal_loglike_t,
            centre,
            mu_,
            logt_,
            factor_jacobian=normal_loglike_t_jacobian,
        ) for centre in centres_
    ]

    model = graph.utils.prod(centre_likelihoods + normal_likelihoods)

    model_approx = graph.EPMeanField.from_approx_dists(
        model,
        {
            mu_: NormalMessage(0, 10),
            logt_: NormalMessage(0, 10),
            **{x_: NormalMessage(0, 10)
               for x_ in centres_},
        },
    )

    return model_approx
Beispiel #3
0
    def test_multivariate_message(self):
        p1, p2, p3 = mp.Plate(), mp.Plate(), mp.Plate()
        x_ = mp.Variable('x', p3, p1)
        y_ = mp.Variable('y', p1, p2)
        z_ = mp.Variable('z', p2, p3)

        n1, n2, n3 = shape = (2, 3, 4)

        def sumxyz(x, y, z):
            return (np.moveaxis(x[:, :, None], 0, 2) + y[:, :, None] + z[None])

        factor = mp.Factor(sumxyz, x=x_, y=y_, z=z_)

        x = np.arange(n3 * n1).reshape(n3, n1) * 0.1
        y = np.arange(n1 * n2).reshape(n1, n2) * 0.2
        z = np.arange(n2 * n3).reshape(n2, n3) * 0.3
        sumxyz(x, y, z)

        variables = {x_: x, y_: y, z_: z}
        factor(variables)

        model_dist = mp.MeanField({
            x_: mp.NormalMessage(x, 1 * np.ones_like(x)),
            y_: mp.NormalMessage(y, 1 * np.ones_like(y)),
            z_: mp.NormalMessage(z, 1 * np.ones_like(z)),
        })

        assert model_dist(variables).log_value.shape == shape
Beispiel #4
0
def make_linear_factor_jac(x_, a_, b_, z_):
    return graph.Factor(
        linear,
        x_,
        a_,
        b_,
        vjp=True,
        # factor_jacobian=linear_jacobian,
        factor_out=z_)
Beispiel #5
0
def make_model():
    prior_norm = stats.norm(loc=0, scale=prior_std)

    def prior(x):
        return prior_norm.logpdf(x).sum()

    def linear(x, a, b):
        return x.dot(a) + b

    linear_factor = graph.Factor(
        linear, x_, a_, b_, 
        factor_out=z_, 
        vjp=True,
    )

    likelihood_factor = messages.NormalMessage(y, np.full_like(y, error_std)).as_factor(z_)
    prior_a = graph.Factor(prior, a_)
    prior_b = graph.Factor(prior, b_)

    model = likelihood_factor * linear_factor * prior_a * prior_b
    return model
Beispiel #6
0
def test_full(data):
    samples = {
        Variable(f"samples_{i}"): sample
        for i, sample in enumerate(data)
    }
    x_i_ = [Variable(f"x_{i}") for i in range(n)]
    logt_i_ = [Variable(f"logt_{i}") for i in range(n)]

    mu_x_ = Variable("mu_x")
    logt_x_ = Variable("logt_x")
    mu_logt_ = Variable("mu_logt")
    logt_logt_ = Variable("logt_logt")
    hierarchical_params = (mu_x_, logt_x_, mu_logt_, logt_logt_)

    # Setting up model
    data_loglikes = [
        graph.Factor(
            normal_loglike_t,
            s_,
            x_,
            logt_,
            factor_jacobian=normal_loglike_t_jacobian,
            name=f"normal_{i}",
        ) for i, (s_, x_, logt_) in enumerate(zip(samples, x_i_, logt_i_))
    ]
    centre_loglikes = [
        graph.Factor(normal_loglike_t, x_, mu_x_, logt_x_) for x_ in x_i_
    ]
    precision_loglikes = [
        graph.Factor(normal_loglike_t, logt_, mu_logt_, logt_logt_)
        for logt_ in logt_i_
    ]
    priors = [
        messages.NormalMessage(0, 10).as_factor(v, name=f"prior_{v.name}")
        for v in hierarchical_params
    ]
    model = graph.utils.prod(data_loglikes + centre_loglikes +
                             precision_loglikes + priors)
Beispiel #7
0
    def test_plates(self):
        obs = autofit.mapper.variable.Plate(name='obs')
        dims = autofit.mapper.variable.Plate(name='dims')

        def sub(a, b):
            return a - b

        a = autofit.mapper.variable.Variable('a', obs, dims)
        b = autofit.mapper.variable.Variable('b', dims)

        subtract = mp.Factor(sub, a=a, b=b)

        x = np.array([[1, 2, 3], [4, 5, 6]])
        y = np.array([1, 2, 1])

        value = subtract({a: x, b: y}).log_value

        assert (value == x - y).all()
def test_simple_transform_diagonal():
    # testing DiagonalTransform
    d = 5
    scale = np.random.exponential(size=d)
    A = np.diag(scale**-1)
    b = np.random.randn(d)

    x = graph.Variable('x', graph.Plate())
    x0 = np.random.randn(d)
    param_shapes = graph.utils.FlattenArrays({x: (d, )})

    def likelihood(x):
        x = x - b
        return 0.5 * np.linalg.multi_dot((x, A, x))

    factor = graph.Factor(likelihood, x=x, is_scalar=True)
    func = factor.flatten(param_shapes)

    res = optimize.minimize(func, x0)
    assert np.allclose(res.x, b, rtol=1e-2, atol=1e-2)
    H, iA = res.hess_inv, np.linalg.inv(A)
    # check R2 score
    assert 1 - np.square(H - iA).mean() / np.square(iA).mean() > 0.95

    scale = np.random.exponential(size=d)
    A = np.diag(scale**-2)

    diag = transform.DiagonalTransform(scale)
    whiten = transform.VariableTransform({x: diag})
    white_factor = transform.TransformedNode(factor, whiten)
    white_func = white_factor.flatten(param_shapes)

    y0 = diag * x0

    res = optimize.minimize(white_func, y0)
    assert np.allclose(res.x, diag * b)
    H, iA = res.hess_inv, np.eye(d)
    # check R2 score
    assert 1 - np.square(H - iA).mean() / np.square(iA).mean() > 0.95

    # testing gradients
    grad = white_func.jacobian(y0)
    ngrad = optimize.approx_fprime(y0, white_func, 1e-6)
    assert np.allclose(grad, ngrad, atol=1e-3, rtol=1e-3)
Beispiel #9
0
def test_hierarchical(centres, widths):
    centres_ = [Variable(f"x_{i}") for i in range(n)]
    mu_ = Variable("mu")
    logt_ = Variable("logt")

    centre_likelihoods = [
        messages.NormalMessage(c, w).as_factor(x)
        for c, w, x in zip(centres, widths, centres_)
    ]

    hierarchical_factor = graph.Factor(
        hierarchical_loglike_t,
        mu_,
        logt_,
        *centres_,
        factor_jacobian=hierarchical_loglike_t_jac,
    )

    model = graph.utils.prod(centre_likelihoods) * hierarchical_factor

    model_approx = graph.EPMeanField.from_approx_dists(
        model,
        {
            mu_: messages.NormalMessage(0.0, 10.0),
            logt_: messages.NormalMessage(0.0, 10.0),
            **{x_: messages.NormalMessage(0.0, 10.0)
               for x_ in centres_},
        },
    )

    laplace = graph.LaplaceOptimiser()
    ep_opt = graph.EPOptimiser(model_approx, default_optimiser=laplace)
    new_approx = ep_opt.run(model_approx, max_steps=10)
    print(new_approx)

    mu_ = new_approx.factor_graph.name_variable_dict["mu"]
    logt_ = new_approx.factor_graph.name_variable_dict["logt"]

    assert new_approx.mean_field[mu_].mean == pytest.approx(np.mean(centres),
                                                            rel=0.2)
    assert new_approx.mean_field[logt_].mean == pytest.approx(np.log(
        np.std(centres)**-2),
                                                              rel=0.2)
Beispiel #10
0
def make_flat_compound(plus, y, sigmoid):
    phi = graph.Factor(log_phi, y)
    return phi * plus * sigmoid
Beispiel #11
0
def make_likelihood_factor(likelihood, z_, y_):
    return mp.Factor(likelihood, z=z_, y=y_)
Beispiel #12
0
def make_prior_b(prior, b_):
    return mp.Factor(prior, x=b_)
Beispiel #13
0
def make_prior_a(prior, a_):
    return mp.Factor(prior, x=a_)
Beispiel #14
0
def make_linear_factor(
        x_, a_, b_, z_
):
    return mp.Factor(linear, x=x_, a=a_, b=b_) == z_
Beispiel #15
0
def make_flat_compound(plus, y, sigmoid):
    g = plus == y
    phi = mp.Factor(log_phi, x=y)
    return phi * g * sigmoid
Beispiel #16
0
def make_linear_factor(x_, a_, b_, z_):
    return graph.Factor(linear, x_, a_, b_, factor_out=z_)
Beispiel #17
0
def make_likelihood_factor(likelihood, z_, y_):
    return graph.Factor(likelihood, z_, y_)
Beispiel #18
0
def make_probit_factor(x):
    return graph.Factor(stats.norm(loc=0.0, scale=1.0).logcdf, x)
Beispiel #19
0
def make_likelihood_factor_jac(z_, y_, obs, dims):
    factor = graph.Factor(likelihood, z_, y_, factor_jacobian=likelihood_jacobian)
    return factor
def test_complex_transform():

    n1, n2, n3 = 2, 3, 2
    d = n1 + n2 * n3

    A = stats.wishart(d, np.eye(d)).rvs()
    b = np.random.rand(d)

    p1, p2, p3 = (graph.Plate() for i in range(3))
    x1 = graph.Variable('x1', p1)
    x2 = graph.Variable('x2', p2, p3)

    mean_field = graph.MeanField({
        x1:
        graph.NormalMessage(np.zeros(n1), 100 * np.ones(n1)),
        x2:
        graph.NormalMessage(np.zeros((n2, n3)), 100 * np.ones((n2, n3))),
    })

    values = mean_field.sample()
    param_shapes = graph.utils.FlattenArrays(
        {v: x.shape
         for v, x in values.items()})

    def likelihood(x1, x2):
        x = np.r_[x1, x2.ravel()] - b
        return 0.5 * np.linalg.multi_dot((x, A, x))

    factor = graph.Factor(likelihood, x1=x1, x2=x2, is_scalar=True)

    cho = transform.CholeskyTransform(linalg.cho_factor(A))
    whiten = transform.FullCholeskyTransform(cho, param_shapes)
    trans_factor = transform.TransformedNode(factor, whiten)

    values = mean_field.sample()
    transformed = whiten * values

    assert np.allclose(factor(values), trans_factor(transformed))

    njac = trans_factor._numerical_func_jacobian(transformed)[1]
    jac = trans_factor.jacobian(transformed)
    ngrad = param_shapes.flatten(njac)
    grad = param_shapes.flatten(jac)

    assert np.allclose(grad, ngrad, atol=1e-3, rtol=1e-3)

    # test VariableTransform with CholeskyTransform
    var_cov = {
        v: (X.reshape((int(X.size**0.5), ) * 2))
        for v, X in param_shapes.unflatten(linalg.inv(A)).items()
    }
    cho_factors = {
        v: transform.CholeskyTransform(linalg.cho_factor(linalg.inv(cov)))
        for v, cov in var_cov.items()
    }
    whiten = transform.VariableTransform(cho_factors)
    trans_factor = transform.TransformedNode(factor, whiten)

    values = mean_field.sample()
    transformed = whiten * values

    assert np.allclose(factor(values), trans_factor(transformed))

    njac = trans_factor._numerical_func_jacobian(transformed)[1]
    jac = trans_factor.jacobian(transformed)
    ngrad = param_shapes.flatten(njac)
    grad = param_shapes.flatten(jac)

    assert np.allclose(grad, ngrad, atol=1e-3, rtol=1e-3)

    res = optimize.minimize(trans_factor.flatten(param_shapes).func_jacobian,
                            param_shapes.flatten(transformed),
                            method='BFGS',
                            jac=True)
    assert res.hess_inv.diagonal() == pytest.approx(1., rel=1e-1)

    # test VariableTransform with CholeskyTransform
    diag_factors = {
        v: transform.DiagonalTransform(cov.diagonal()**0.5)
        for v, cov in var_cov.items()
    }
    whiten = transform.VariableTransform(diag_factors)
    trans_factor = transform.TransformedNode(factor, whiten)

    values = mean_field.sample()
    transformed = whiten * values

    assert np.allclose(factor(values), trans_factor(transformed))

    njac = trans_factor._numerical_func_jacobian(transformed)[1]
    jac = trans_factor.jacobian(transformed)
    ngrad = param_shapes.flatten(njac)
    grad = param_shapes.flatten(jac)

    assert np.allclose(grad, ngrad, atol=1e-3, rtol=1e-3)

    res = optimize.minimize(trans_factor.flatten(param_shapes).func_jacobian,
                            param_shapes.flatten(transformed),
                            method='BFGS',
                            jac=True)
    assert res.hess_inv.diagonal() == pytest.approx(1., rel=1e-1)
Beispiel #21
0
def make_plus(x, y):
    return graph.Factor(plus_two, x, factor_out=y)
Beispiel #22
0
def make_phi(x):
    return graph.Factor(log_phi, x)
Beispiel #23
0
def make_vectorised_sigmoid(x):
    return graph.Factor(log_sigmoid, x)
Beispiel #24
0
def make_sigmoid(x):
    return graph.Factor(log_sigmoid, x)
def test_simple_transform_cholesky():

    np.random.seed(0)

    d = 5
    A = stats.wishart(d, np.eye(d)).rvs()
    b = np.random.rand(d)

    def likelihood(x):
        x = x - b
        return 0.5 * np.linalg.multi_dot((x, A, x))

    x = graph.Variable('x', graph.Plate())
    x0 = np.random.randn(d)

    factor = graph.Factor(likelihood, x=x, is_scalar=True)
    param_shapes = graph.utils.FlattenArrays({x: (d, )})
    func = factor.flatten(param_shapes)

    res = optimize.minimize(func, x0)
    assert np.allclose(res.x, b, rtol=1e-2)
    H, iA = res.hess_inv, np.linalg.inv(A)
    # check R2 score
    assert 1 - np.square(H - iA).mean() / np.square(iA).mean() > 0.95

    # cho = transform.CholeskyTransform(linalg.cho_factor(A))
    cho = transform.CholeskyTransform.from_dense(A)
    whiten = transform.VariableTransform({x: cho})
    white_factor = transform.TransformedNode(factor, whiten)
    white_func = white_factor.flatten(param_shapes)

    y0 = cho * x0

    res = optimize.minimize(white_func, y0)
    assert np.allclose(res.x, cho * b, atol=1e-3, rtol=1e-3)
    assert np.allclose(res.hess_inv, np.eye(d), atol=1e-3, rtol=1e-3)

    # testing gradients

    grad = white_func.jacobian(y0)
    ngrad = optimize.approx_fprime(y0, white_func, 1e-6)
    assert np.allclose(grad, ngrad, atol=1e-3, rtol=1e-3)

    # testing CovarianceTransform,

    cho = transform.CovarianceTransform.from_dense(iA)
    whiten = transform.VariableTransform({x: cho})
    white_factor = transform.TransformedNode(factor, whiten)
    white_func = white_factor.flatten(param_shapes)

    y0 = cho * x0

    res = optimize.minimize(white_func, y0)
    assert np.allclose(res.x, cho * b, atol=1e-3, rtol=1e-3)
    assert np.allclose(res.hess_inv, np.eye(d), atol=1e-3, rtol=1e-3)

    # testing gradients

    grad = white_func.jacobian(y0)
    ngrad = optimize.approx_fprime(y0, white_func, 1e-6)
    assert np.allclose(grad, ngrad, atol=1e-3, rtol=1e-3)

    # testing FullCholeskyTransform

    whiten = transform.FullCholeskyTransform(cho, param_shapes)
    white_factor = transform.TransformedNode(factor, whiten)
    white_func = white_factor.flatten(param_shapes)

    y0 = cho * x0

    res = optimize.minimize(white_func, y0)
    assert np.allclose(res.x, cho * b, atol=1e-3, rtol=1e-3)
    assert np.allclose(res.hess_inv, np.eye(d), atol=1e-3, rtol=1e-3)

    # testing gradients

    grad = white_func.jacobian(y0)
    ngrad = optimize.approx_fprime(y0, white_func, 1e-6)
    assert np.allclose(grad, ngrad, atol=1e-3, rtol=1e-3)
Beispiel #26
0
    def test_jacobian(self, x, coefficient):
        factor = graph.Factor(lambda p: coefficient * p, x)

        assert factor.jacobian({x: 2}).grad()[x] == pytest.approx(coefficient)
Beispiel #27
0
def make_likelihood_factor(z_, y_, obs, dims):
    factor = graph.Factor(likelihood, z_, y_)
    return factor
Beispiel #28
0
def make_prior_a(prior, a_):
    return graph.Factor(prior, a_)
Beispiel #29
0
def _test():
    ## define parameters of model
    np.random.seed(1)

    alpha, beta, gamma, delta = 2 / 3, 4 / 3, 1, 1
    r = np.array([alpha, -gamma])
    A = np.array([[0., beta / alpha], [delta / gamma, 0.]])
    K = 1
    noise = 0.1

    # starting composition
    y0 = np.array([1., 1.])

    n_species = len(y0)
    n_obs = 30
    t_space = 1.
    t_obs = np.r_[0, (np.arange(n_obs - 1) * t_space +
                      np.random.rand(n_obs - 1)) * t_space]

    def lotka_volterra(t, z, r=r, A=A, K=K):
        return z * r * (1 - A.dot(z) / K)

    def calc_lotka_volterra(y0, r, A, K, t_obs):
        res = integrate.solve_ivp(lotka_volterra, (t_obs[0], t_obs[-1]),
                                  y0,
                                  t_eval=t_obs,
                                  args=(r, A, K),
                                  method='BDF')

        y_ = res.y
        n = y_.shape[1]
        n_obs = len(t_obs)
        # make sure output is correct dimension
        if n != n_obs:
            y_ = np.c_[y_, np.repeat(y_[:,
                                        [-1]], n_obs - n, axis=1)][:, :n_obs]
            if y_.shape[1] != n_obs:
                raise Exception

        return y_

    y_true = calc_lotka_volterra(y0, r, A, K, t_obs)
    y = y_true + noise * np.random.randn(n_species, n_obs)

    ## Specifying dimensions of problem
    obs = autofit.mapper.variable.Plate(name='obs')
    species = autofit.mapper.variable.Plate(name='species')
    # Need to specify a second plate for species because
    # A is (species, species) and we need a second plate
    # to unique specify the second dimension
    speciesA = autofit.mapper.variable.Plate(name='species')
    dims = autofit.mapper.variable.Plate(name='dims')

    ## Specifying variables
    r_ = autofit.mapper.variable.Variable('r', species)
    A_ = autofit.mapper.variable.Variable('A', species, speciesA)
    K_ = autofit.mapper.variable.Variable('K')

    y0_ = autofit.mapper.variable.Variable('y0', species)
    y_ = autofit.mapper.variable.Variable('y', species, obs)

    y_obs_ = autofit.mapper.variable.Variable('y_obs', species, obs)
    t_obs_ = autofit.mapper.variable.Variable('t_obs', obs)

    _norm = stats.norm(loc=0, scale=noise)
    _prior = stats.norm(loc=0, scale=10)
    _prior_exp = stats.expon(loc=0, scale=1)

    def _likelihood(y_obs, y):
        return _norm.logpdf(y_obs - y)

    ## Specifying factors

    likelihood = mp.Factor(_likelihood, y_obs=y_obs_, y=y_)
    prior_A = mp.Factor(_prior.logpdf, 'prior_A', x=A_)
    prior_r = mp.Factor(_prior.logpdf, 'prior_r', x=r_)
    prior_y0 = mp.Factor(_prior_exp.logpdf, 'prior_y0', x=y0_)

    # calc_lotka_volterra does not vectorise over
    # multiple inputs, see `FactorNode._py_vec_call`
    LV = mp.Factor(calc_lotka_volterra,
                   'LV',
                   vectorised=False,
                   y0=y0_,
                   r=r_,
                   A=A_,
                   K=K_,
                   t_obs=t_obs_) == y_

    ## Defining model
    priors = prior_A * prior_r * prior_y0
    LV_model = (likelihood * LV) * priors
    LV_model._name = 'LV_model'

    model_approx = mp.EPMeanField.from_kws(
        LV_model,
        {
            A_:
            autofit.graphical.messages.normal.NormalMessage.from_mode(A, 100.),
            r_:
            autofit.graphical.messages.normal.NormalMessage.from_mode(r, 100.),
            y0_:
            autofit.graphical.messages.gamma.GammaMessage.from_mode(
                np.ones_like(y0), 1),
            y_:
            autofit.graphical.messages.normal.NormalMessage.from_mode(y, 1),
            K_:
            autofit.graphical.messages.fixed.FixedMessage(1),
            y_obs_:
            autofit.graphical.messages.fixed.FixedMessage(y),
            t_obs_:
            autofit.graphical.messages.fixed.FixedMessage(t_obs)
        },
    )

    history = {}
    n_iter = 1

    factors = [f for f in LV_model.factors if f not in (LV, )]

    np.random.seed(1)

    opt = mp.optimise.LaplaceOptimiser(n_iter=n_iter)

    for i in range(n_iter):
        # perform least squares fit for LV model
        model_approx, status = mp.lstsq_laplace_factor_approx(model_approx, LV)

        # perform laplace non linear fit for other factors
        for factor in factors:
            model_approx, status = mp.optimise.laplace_factor_approx(
                model_approx,
                factor,
                status=status,
            )
            history[i, factor] = model_approx

    # model_mean = {v: d.mean for v, d in model_approx.mean_field.items()}
    # y_pred = LV_model(model_mean).deterministic_values[y_]
    y_pred = model_approx.mean_field[y_].mean

    assert np.square(y_pred - y).mean()**0.5 < 2
Beispiel #30
0
def make_prior_b(prior, b_):
    return graph.Factor(prior, b_)