예제 #1
0
def test_normal_qr_transform():
    np.random.seed(9283)

    N = 10
    M = 3
    X_tt = tt.matrix("X")
    X = np.random.normal(10, 1, size=N)
    X = np.c_[np.ones(10), X, X * X]
    X_tt.tag.test_value = X

    V_tt = tt.vector("V")
    V_tt.tag.test_value = np.ones(N)

    a_tt = tt.vector("a")
    R_tt = tt.vector("R")
    a_tt.tag.test_value = np.random.normal(size=M)
    R_tt.tag.test_value = np.abs(np.random.normal(size=M))

    beta_rv = NormalRV(a_tt, R_tt, name="\\beta")

    E_y_rv = X_tt.dot(beta_rv)
    E_y_rv.name = "E_y"
    Y_rv = NormalRV(E_y_rv, V_tt, name="Y")

    y_tt = tt.as_tensor_variable(Y_rv.tag.test_value)
    y_tt.name = "y"
    y_obs_rv = observed(y_tt, Y_rv)
    y_obs_rv.name = "y_obs"

    (res, ) = run(1, var("q"), normal_qr_transform(y_obs_rv, var("q")))

    new_node = {eval_and_reify_meta(k): eval_and_reify_meta(v) for k, v in res}

    # Make sure the old-to-new `beta` conversion is correct.
    t_Q, t_R = np.linalg.qr(X)
    Coef_new_value = np.linalg.inv(t_R)
    np.testing.assert_array_almost_equal(
        Coef_new_value, new_node[beta_rv].owner.inputs[0].tag.test_value)

    # Make sure the new `beta_tilde` has the right standard normal distribution
    # parameters.
    beta_tilde_node = new_node[beta_rv].owner.inputs[1]
    np.testing.assert_array_almost_equal(
        np.r_[0.0, 0.0, 0.0], beta_tilde_node.owner.inputs[0].tag.test_value)
    np.testing.assert_array_almost_equal(
        np.r_[1.0, 1.0, 1.0], beta_tilde_node.owner.inputs[1].tag.test_value)

    Y_new = new_node[y_obs_rv].owner.inputs[1]
    assert Y_new.owner.inputs[0].owner.inputs[1] == beta_tilde_node

    np.testing.assert_array_almost_equal(
        t_Q, Y_new.owner.inputs[0].owner.inputs[0].tag.test_value)
예제 #2
0
def test_mvnormal_conjugate():
    """Test that we can produce the closed-form distribution for the conjugate
    multivariate normal-regression with normal-prior model.
    """
    # import symbolic_pymc.theano.meta as tm
    #
    # tm.load_dispatcher()

    tt.config.cxx = ""
    tt.config.compute_test_value = "ignore"

    a_tt = tt.vector("a")
    R_tt = tt.matrix("R")
    F_t_tt = tt.matrix("F")
    V_tt = tt.matrix("V")

    a_tt.tag.test_value = np.r_[1.0, 0.0]
    R_tt.tag.test_value = np.diag([10.0, 10.0])
    F_t_tt.tag.test_value = np.c_[-2.0, 1.0]
    V_tt.tag.test_value = np.diag([0.5])

    beta_rv = MvNormalRV(a_tt, R_tt, name="\\beta")

    E_y_rv = F_t_tt.dot(beta_rv)
    Y_rv = MvNormalRV(E_y_rv, V_tt, name="Y")

    y_tt = tt.as_tensor_variable(np.r_[-3.0])
    y_tt.name = "y"
    Y_obs = observed(y_tt, Y_rv)

    q_lv = var()

    (expr_graph, ) = run(1, q_lv, walko(conjugate, Y_obs, q_lv))

    fgraph_opt = expr_graph.eval_obj
    fgraph_opt_tt = fgraph_opt.reify()

    # Check that the SSE has decreased from prior to posterior.
    # TODO: Use a better test.
    beta_prior_mean_val = a_tt.tag.test_value
    F_val = F_t_tt.tag.test_value
    beta_post_mean_val = fgraph_opt_tt.owner.inputs[0].tag.test_value
    priorp_err = np.square(y_tt.data - F_val.dot(beta_prior_mean_val)).sum()
    postp_err = np.square(y_tt.data - F_val.dot(beta_post_mean_val)).sum()

    # First, make sure the prior and posterior means are simply not equal.
    with pytest.raises(AssertionError):
        np.testing.assert_array_equal(priorp_err, postp_err)

    # Now, make sure there's a decrease (relative to the observed point).
    np.testing.assert_array_less(postp_err, priorp_err)
예제 #3
0
def test_mvnormal_mvnormal():
    """Test that we can produce the closed-form distribution for the conjugate
    multivariate normal-regression with normal-prior model.
    """
    tt.config.cxx = ''
    tt.config.compute_test_value = 'ignore'

    a_tt = tt.vector('a')
    R_tt = tt.matrix('R')
    F_t_tt = tt.matrix('F')
    V_tt = tt.matrix('V')

    a_tt.tag.test_value = np.r_[1., 0.]
    R_tt.tag.test_value = np.diag([10., 10.])
    F_t_tt.tag.test_value = np.c_[-2., 1.]
    V_tt.tag.test_value = np.diag([0.5])

    beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta')

    E_y_rv = F_t_tt.dot(beta_rv)
    Y_rv = MvNormalRV(E_y_rv, V_tt, name='Y')

    y_tt = tt.as_tensor_variable(np.r_[-3.])
    y_tt.name = 'y'
    Y_obs = observed(y_tt, Y_rv)

    q_lv = var()

    expr_graph, = run(1, q_lv,
                      (tt_graph_applyo, conjugate, Y_obs, q_lv))

    fgraph_opt = expr_graph.eval_obj
    fgraph_opt_tt = fgraph_opt.reify()

    # Check that the SSE has decreased from prior to posterior.
    # TODO: Use a better test.
    beta_prior_mean_val = a_tt.tag.test_value
    F_val = F_t_tt.tag.test_value
    beta_post_mean_val = fgraph_opt_tt.owner.inputs[0].tag.test_value
    priorp_err = np.square(
        y_tt.data - F_val.dot(beta_prior_mean_val)).sum()
    postp_err = np.square(
        y_tt.data - F_val.dot(beta_post_mean_val)).sum()

    # First, make sure the prior and posterior means are simply not equal.
    np.testing.assert_raises(
        AssertionError, np.testing.assert_array_equal,
        priorp_err, postp_err)
    # Now, make sure there's a decrease (relative to the observed point).
    np.testing.assert_array_less(postp_err, priorp_err)
예제 #4
0
def test_scale_loc_transform():
    tt.config.compute_test_value = "ignore"

    rand_state = theano.shared(np.random.RandomState())
    mu_a = NormalRV(0.0, 100**2, name="mu_a", rng=rand_state)
    sigma_a = HalfCauchyRV(5, name="sigma_a", rng=rand_state)
    mu_b = NormalRV(0.0, 100**2, name="mu_b", rng=rand_state)
    sigma_b = HalfCauchyRV(5, name="sigma_b", rng=rand_state)
    county_idx = np.r_[1, 1, 2, 3]
    # We want the following for a, b:
    # N(m, S) -> m + N(0, 1) * S
    a = NormalRV(mu_a,
                 sigma_a,
                 size=(len(county_idx), ),
                 name="a",
                 rng=rand_state)
    b = NormalRV(mu_b,
                 sigma_b,
                 size=(len(county_idx), ),
                 name="b",
                 rng=rand_state)
    radon_est = a[county_idx] + b[county_idx] * 7
    eps = HalfCauchyRV(5, name="eps", rng=rand_state)
    radon_like = NormalRV(radon_est, eps, name="radon_like", rng=rand_state)
    radon_like_rv = observed(tt.as_tensor_variable(np.r_[1.0, 2.0, 3.0, 4.0]),
                             radon_like)

    q_lv = var()

    (expr_graph, ) = run(
        1, q_lv,
        non_obs_walko(partial(reduceo, scale_loc_transform), radon_like_rv,
                      q_lv))

    radon_like_rv_opt = expr_graph.reify()

    assert radon_like_rv_opt.owner.op == observed

    radon_like_opt = radon_like_rv_opt.owner.inputs[1]
    radon_est_opt = radon_like_opt.owner.inputs[0]

    # These should now be `tt.add(mu_*, ...)` outputs.
    a_opt = radon_est_opt.owner.inputs[0].owner.inputs[0]
    b_opt = radon_est_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
    # Make sure NormalRV gets replaced with an addition
    assert a_opt.owner.op == tt.add
    assert b_opt.owner.op == tt.add

    # Make sure the first term in the addition is the old NormalRV mean
    mu_a_opt = a_opt.owner.inputs[0].owner.inputs[0]
    assert "mu_a" == mu_a_opt.name == mu_a.name
    mu_b_opt = b_opt.owner.inputs[0].owner.inputs[0]
    assert "mu_b" == mu_b_opt.name == mu_b.name

    # Make sure the second term in the addition is the standard NormalRV times
    # the old std. dev.
    assert a_opt.owner.inputs[1].owner.op == tt.mul
    assert b_opt.owner.inputs[1].owner.op == tt.mul

    sigma_a_opt = a_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
    assert sigma_a_opt.owner.op == sigma_a.owner.op
    sigma_b_opt = b_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
    assert sigma_b_opt.owner.op == sigma_b.owner.op

    a_std_norm_opt = a_opt.owner.inputs[1].owner.inputs[1]
    assert a_std_norm_opt.owner.op == NormalRV
    assert a_std_norm_opt.owner.inputs[0].data == 0.0
    assert a_std_norm_opt.owner.inputs[1].data == 1.0
    b_std_norm_opt = b_opt.owner.inputs[1].owner.inputs[1]
    assert b_std_norm_opt.owner.op == NormalRV
    assert b_std_norm_opt.owner.inputs[0].data == 0.0
    assert b_std_norm_opt.owner.inputs[1].data == 1.0
예제 #5
0
def test_normal_normal_regression():
    tt.config.compute_test_value = "ignore"
    theano.config.cxx = ""
    np.random.seed(9283)

    N = 10
    M = 3
    a_tt = tt.vector("a")
    R_tt = tt.vector("R")
    X_tt = tt.matrix("X")
    V_tt = tt.vector("V")

    a_tt.tag.test_value = np.random.normal(size=M)
    R_tt.tag.test_value = np.abs(np.random.normal(size=M))
    X = np.random.normal(10, 1, size=N)
    X = np.c_[np.ones(10), X, X * X]
    X_tt.tag.test_value = X
    V_tt.tag.test_value = np.ones(N)

    beta_rv = NormalRV(a_tt, R_tt, name="\\beta")

    E_y_rv = X_tt.dot(beta_rv)
    E_y_rv.name = "E_y"
    Y_rv = NormalRV(E_y_rv, V_tt, name="Y")

    y_tt = tt.as_tensor_variable(Y_rv.tag.test_value)
    y_tt.name = "y"
    y_obs_rv = observed(y_tt, Y_rv)
    y_obs_rv.name = "y_obs"

    #
    # Use the relation with identify/match `Y`, `X` and `beta`.
    #
    y_args_tail_lv, b_args_tail_lv = var(), var()
    beta_lv = var()

    y_args_lv, y_lv, Y_lv, X_lv = var(), var(), var(), var()
    (res, ) = run(
        1,
        (beta_lv, y_args_tail_lv, b_args_tail_lv),
        applyo(mt.observed, y_args_lv, y_obs_rv),
        eq(y_args_lv, (y_lv, Y_lv)),
        normal_normal_regression(Y_lv, X_lv, beta_lv, y_args_tail_lv,
                                 b_args_tail_lv),
    )

    # TODO FIXME: This would work if non-op parameters (e.g. names) were covered by
    # `operator`/`car`.  See `TheanoMetaOperator`.
    assert res[0].eval_obj.obj == beta_rv
    assert res[0] == etuplize(beta_rv)
    assert res[1] == etuplize(Y_rv)[2:]
    assert res[2] == etuplize(beta_rv)[1:]

    #
    # Use the relation with to produce `Y` from given `X` and `beta`.
    #
    X_new_mt = mt(tt.eye(N, M))
    beta_new_mt = mt(NormalRV(0, 1, size=M))
    Y_args_cdr_mt = etuplize(Y_rv)[2:]
    Y_lv = var()
    (res, ) = run(
        1, Y_lv,
        normal_normal_regression(Y_lv, X_new_mt, beta_new_mt, Y_args_cdr_mt))
    Y_out_mt = res.eval_obj

    Y_new_mt = etuple(mt.NormalRV, mt.dot(X_new_mt,
                                          beta_new_mt)) + Y_args_cdr_mt
    Y_new_mt = Y_new_mt.eval_obj

    assert Y_out_mt == Y_new_mt
예제 #6
0
def test_normals_to_model():
    """Test conversion to a PyMC3 model."""
    tt.config.compute_test_value = 'ignore'

    a_tt = tt.vector('a')
    R_tt = tt.matrix('R')
    F_t_tt = tt.matrix('F')
    V_tt = tt.matrix('V')

    a_tt.tag.test_value = np.r_[1., 0.]
    R_tt.tag.test_value = np.diag([10., 10.])
    F_t_tt.tag.test_value = np.c_[-2., 1.]
    V_tt.tag.test_value = np.diag([0.5])

    beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta')

    E_y_rv = F_t_tt.dot(beta_rv)
    Y_rv = MvNormalRV(E_y_rv, V_tt, name='Y')

    y_val = np.r_[-3.]

    def _check_model(model):
        assert len(model.observed_RVs) == 1
        assert model.observed_RVs[0].name == 'Y'
        Y_pm = model.observed_RVs[0].distribution
        assert isinstance(Y_pm, pm.MvNormal)
        np.testing.assert_array_equal(model.observed_RVs[0].observations.data,
                                      y_val)
        assert Y_pm.mu.owner.op == tt.basic._dot
        assert Y_pm.cov.name == 'V'
        assert len(model.unobserved_RVs) == 1
        assert model.unobserved_RVs[0].name == '\\beta'
        beta_pm = model.unobserved_RVs[0].distribution
        assert isinstance(beta_pm, pm.MvNormal)

    y_tt = theano.shared(y_val, name='y')
    Y_obs = observed(y_tt, Y_rv)

    fgraph = FunctionGraph(tt_inputs([beta_rv, Y_obs]), [beta_rv, Y_obs],
                           clone=True)

    model = graph_model(fgraph)

    _check_model(model)

    # Now, let `graph_model` create the `FunctionGraph`
    model = graph_model(Y_obs)

    _check_model(model)

    # Use a different type of observation value
    y_tt = tt.as_tensor_variable(y_val, name='y')
    Y_obs = observed(y_tt, Y_rv)

    model = graph_model(Y_obs)

    _check_model(model)

    # Use an invalid type of observation value
    tt.config.compute_test_value = 'ignore'
    y_tt = tt.vector('y')
    Y_obs = observed(y_tt, Y_rv)

    with pytest.raises(TypeError):
        model = graph_model(Y_obs)
예제 #7
0
def test_notex_print():

    tt_normalrv_noname_expr = tt.scalar("b") * NormalRV(
        tt.scalar("\\mu"), tt.scalar("\\sigma"))
    expected = textwrap.dedent(r"""
    b in R, \mu in R, \sigma in R
    a ~ N(\mu, \sigma**2) in R
    (b * a)
    """)
    assert tt_pprint(tt_normalrv_noname_expr) == expected.strip()

    # Make sure the constant shape is show in values and not symbols.
    tt_normalrv_name_expr = tt.scalar("b") * NormalRV(
        tt.scalar("\\mu"), tt.scalar("\\sigma"), size=[2, 1], name="X")
    expected = textwrap.dedent(r"""
    b in R, \mu in R, \sigma in R
    X ~ N(\mu, \sigma**2) in R**(2 x 1)
    (b * X)
    """)
    assert tt_pprint(tt_normalrv_name_expr) == expected.strip()

    tt_2_normalrv_noname_expr = tt.matrix("M") * NormalRV(
        tt.scalar("\\mu_2"), tt.scalar("\\sigma_2"))
    tt_2_normalrv_noname_expr *= tt.scalar("b") * NormalRV(
        tt_2_normalrv_noname_expr, tt.scalar("\\sigma")) + tt.scalar("c")
    expected = textwrap.dedent(r"""
    M in R**(N^M_0 x N^M_1), \mu_2 in R, \sigma_2 in R
    b in R, \sigma in R, c in R
    a ~ N(\mu_2, \sigma_2**2) in R, d ~ N((M * a), \sigma**2) in R**(N^d_0 x N^d_1)
    ((M * a) * ((b * d) + c))
    """)
    assert tt_pprint(tt_2_normalrv_noname_expr) == expected.strip()

    expected = textwrap.dedent(r"""
    b in Z, c in Z, M in R**(N^M_0 x N^M_1)
    M[b, c]
    """)
    # TODO: "c" should be "1".
    assert (tt_pprint(
        tt.matrix("M")[tt.iscalar("a"),
                       tt.constant(1, dtype="int")]) == expected.strip())

    expected = textwrap.dedent(r"""
    M in R**(N^M_0 x N^M_1)
    M[1]
    """)
    assert tt_pprint(tt.matrix("M")[1]) == expected.strip()

    expected = textwrap.dedent(r"""
    M in N**(N^M_0)
    M[2:4:0]
    """)
    assert tt_pprint(tt.vector("M", dtype="uint32")[0:4:2]) == expected.strip()

    norm_rv = NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma"))
    rv_obs = observed(tt.constant(1.0, dtype=norm_rv.dtype), norm_rv)

    expected = textwrap.dedent(r"""
    \mu in R, \sigma in R
    a ~ N(\mu, \sigma**2) in R
    a = 1.0
        """)
    assert tt_pprint(rv_obs) == expected.strip()
예제 #8
0
def test_tex_print():

    tt_normalrv_noname_expr = tt.scalar("b") * NormalRV(
        tt.scalar("\\mu"), tt.scalar("\\sigma"))
    expected = textwrap.dedent(r"""
    \begin{equation}
      \begin{gathered}
      b \in \mathbb{R}, \,\mu \in \mathbb{R}, \,\sigma \in \mathbb{R}
      \\
      a \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\,  \in \mathbb{R}
      \end{gathered}
      \\
      (b \odot a)
    \end{equation}
    """)
    assert tt_tprint(tt_normalrv_noname_expr) == expected.strip()

    tt_normalrv_name_expr = tt.scalar("b") * NormalRV(
        tt.scalar("\\mu"), tt.scalar("\\sigma"), size=[2, 1], name="X")
    expected = textwrap.dedent(r"""
    \begin{equation}
      \begin{gathered}
      b \in \mathbb{R}, \,\mu \in \mathbb{R}, \,\sigma \in \mathbb{R}
      \\
      X \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\,  \in \mathbb{R}^{2 \times 1}
      \end{gathered}
      \\
      (b \odot X)
    \end{equation}
    """)
    assert tt_tprint(tt_normalrv_name_expr) == expected.strip()

    tt_2_normalrv_noname_expr = tt.matrix("M") * NormalRV(
        tt.scalar("\\mu_2"), tt.scalar("\\sigma_2"))
    tt_2_normalrv_noname_expr *= tt.scalar("b") * NormalRV(
        tt_2_normalrv_noname_expr, tt.scalar("\\sigma")) + tt.scalar("c")
    expected = textwrap.dedent(r"""
    \begin{equation}
      \begin{gathered}
      M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}}
      \\
      \mu_2 \in \mathbb{R}, \,\sigma_2 \in \mathbb{R}
      \\
      b \in \mathbb{R}, \,\sigma \in \mathbb{R}, \,c \in \mathbb{R}
      \\
      a \sim \operatorname{N}\left(\mu_2, {\sigma_2}^{2}\right)\,  \in \mathbb{R}
      \\
      d \sim \operatorname{N}\left((M \odot a), {\sigma}^{2}\right)\,  \in \mathbb{R}^{N^{d}_{0} \times N^{d}_{1}}
      \end{gathered}
      \\
      ((M \odot a) \odot ((b \odot d) + c))
    \end{equation}
    """)
    assert tt_tprint(tt_2_normalrv_noname_expr) == expected.strip()

    expected = textwrap.dedent(r"""
    \begin{equation}
      \begin{gathered}
      b \in \mathbb{Z}, \,c \in \mathbb{Z}, \,M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}}
      \end{gathered}
      \\
      M\left[b, \,c\right]
    \end{equation}
    """)
    # TODO: "c" should be "1".
    assert (tt_tprint(
        tt.matrix("M")[tt.iscalar("a"),
                       tt.constant(1, dtype="int")]) == expected.strip())

    expected = textwrap.dedent(r"""
    \begin{equation}
      \begin{gathered}
      M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}}
      \end{gathered}
      \\
      M\left[1\right]
    \end{equation}
    """)
    assert tt_tprint(tt.matrix("M")[1]) == expected.strip()

    expected = textwrap.dedent(r"""
    \begin{equation}
      \begin{gathered}
      M \in \mathbb{N}^{N^{M}_{0}}
      \end{gathered}
      \\
      M\left[2:4:0\right]
    \end{equation}
    """)
    assert tt_tprint(tt.vector("M", dtype="uint32")[0:4:2]) == expected.strip()

    norm_rv = NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma"))
    rv_obs = observed(tt.constant(1.0, dtype=norm_rv.dtype), norm_rv)

    expected = textwrap.dedent(r"""
    \begin{equation}
      \begin{gathered}
      \mu \in \mathbb{R}, \,\sigma \in \mathbb{R}
      \\
      a \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\,  \in \mathbb{R}
      \end{gathered}
      \\
      a = 1.0
    \end{equation}
        """)
    assert tt_tprint(rv_obs) == expected.strip()
예제 #9
0
def test_pymc_normals():
    tt.config.compute_test_value = 'ignore'

    rand_state = theano.shared(np.random.RandomState())
    mu_a = NormalRV(0., 100**2, name='mu_a', rng=rand_state)
    sigma_a = HalfCauchyRV(5, name='sigma_a', rng=rand_state)
    mu_b = NormalRV(0., 100**2, name='mu_b', rng=rand_state)
    sigma_b = HalfCauchyRV(5, name='sigma_b', rng=rand_state)
    county_idx = np.r_[1, 1, 2, 3]
    # We want the following for a, b:
    # N(m, S) -> m + N(0, 1) * S
    a = NormalRV(mu_a,
                 sigma_a,
                 size=(len(county_idx), ),
                 name='a',
                 rng=rand_state)
    b = NormalRV(mu_b,
                 sigma_b,
                 size=(len(county_idx), ),
                 name='b',
                 rng=rand_state)
    radon_est = a[county_idx] + b[county_idx] * 7
    eps = HalfCauchyRV(5, name='eps', rng=rand_state)
    radon_like = NormalRV(radon_est, eps, name='radon_like', rng=rand_state)
    radon_like_rv = observed(tt.as_tensor_variable(np.r_[1., 2., 3., 4.]),
                             radon_like)

    graph_mt = mt(radon_like_rv)
    expr_graph, = run(
        1, var('q'),
        non_obs_fixedp_graph_applyo(scale_loc_transform, graph_mt, var('q')))

    radon_like_rv_opt = expr_graph.reify()

    assert radon_like_rv_opt.owner.op == observed

    radon_like_opt = radon_like_rv_opt.owner.inputs[1]
    radon_est_opt = radon_like_opt.owner.inputs[0]

    # These should now be `tt.add(mu_*, ...)` outputs.
    a_opt = radon_est_opt.owner.inputs[0].owner.inputs[0]
    b_opt = radon_est_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
    # Make sure NormalRV gets replaced with an addition
    assert a_opt.owner.op == tt.add
    assert b_opt.owner.op == tt.add

    # Make sure the first term in the addition is the old NormalRV mean
    mu_a_opt = a_opt.owner.inputs[0].owner.inputs[0]
    assert 'mu_a' == mu_a_opt.name == mu_a.name
    mu_b_opt = b_opt.owner.inputs[0].owner.inputs[0]
    assert 'mu_b' == mu_b_opt.name == mu_b.name

    # Make sure the second term in the addition is the standard NormalRV times
    # the old std. dev.
    assert a_opt.owner.inputs[1].owner.op == tt.mul
    assert b_opt.owner.inputs[1].owner.op == tt.mul

    sigma_a_opt = a_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
    assert sigma_a_opt.owner.op == sigma_a.owner.op
    sigma_b_opt = b_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
    assert sigma_b_opt.owner.op == sigma_b.owner.op

    a_std_norm_opt = a_opt.owner.inputs[1].owner.inputs[1]
    assert a_std_norm_opt.owner.op == NormalRV
    assert a_std_norm_opt.owner.inputs[0].data == 0.0
    assert a_std_norm_opt.owner.inputs[1].data == 1.0
    b_std_norm_opt = b_opt.owner.inputs[1].owner.inputs[1]
    assert b_std_norm_opt.owner.op == NormalRV
    assert b_std_norm_opt.owner.inputs[0].data == 0.0
    assert b_std_norm_opt.owner.inputs[1].data == 1.0