def test_normal_qr_transform(): np.random.seed(9283) N = 10 M = 3 X_tt = tt.matrix("X") X = np.random.normal(10, 1, size=N) X = np.c_[np.ones(10), X, X * X] X_tt.tag.test_value = X V_tt = tt.vector("V") V_tt.tag.test_value = np.ones(N) a_tt = tt.vector("a") R_tt = tt.vector("R") a_tt.tag.test_value = np.random.normal(size=M) R_tt.tag.test_value = np.abs(np.random.normal(size=M)) beta_rv = NormalRV(a_tt, R_tt, name="\\beta") E_y_rv = X_tt.dot(beta_rv) E_y_rv.name = "E_y" Y_rv = NormalRV(E_y_rv, V_tt, name="Y") y_tt = tt.as_tensor_variable(Y_rv.tag.test_value) y_tt.name = "y" y_obs_rv = observed(y_tt, Y_rv) y_obs_rv.name = "y_obs" (res, ) = run(1, var("q"), normal_qr_transform(y_obs_rv, var("q"))) new_node = {eval_and_reify_meta(k): eval_and_reify_meta(v) for k, v in res} # Make sure the old-to-new `beta` conversion is correct. t_Q, t_R = np.linalg.qr(X) Coef_new_value = np.linalg.inv(t_R) np.testing.assert_array_almost_equal( Coef_new_value, new_node[beta_rv].owner.inputs[0].tag.test_value) # Make sure the new `beta_tilde` has the right standard normal distribution # parameters. beta_tilde_node = new_node[beta_rv].owner.inputs[1] np.testing.assert_array_almost_equal( np.r_[0.0, 0.0, 0.0], beta_tilde_node.owner.inputs[0].tag.test_value) np.testing.assert_array_almost_equal( np.r_[1.0, 1.0, 1.0], beta_tilde_node.owner.inputs[1].tag.test_value) Y_new = new_node[y_obs_rv].owner.inputs[1] assert Y_new.owner.inputs[0].owner.inputs[1] == beta_tilde_node np.testing.assert_array_almost_equal( t_Q, Y_new.owner.inputs[0].owner.inputs[0].tag.test_value)
def test_notex_print(): tt_normalrv_noname_expr = tt.scalar('b') * NormalRV( tt.scalar('\\mu'), tt.scalar('\\sigma')) expected = 'b in R, \\mu in R, \\sigma in R\na ~ N(\\mu, \\sigma**2) in R\n(b * a)' assert tt_pprint(tt_normalrv_noname_expr) == expected # Make sure the constant shape is show in values and not symbols. tt_normalrv_name_expr = tt.scalar('b') * NormalRV( tt.scalar('\\mu'), tt.scalar('\\sigma'), size=[2, 1], name='X') expected = 'b in R, \\mu in R, \\sigma in R\nX ~ N(\\mu, \\sigma**2) in R**(2 x 1)\n(b * X)' assert tt_pprint(tt_normalrv_name_expr) == expected tt_2_normalrv_noname_expr = tt.matrix('M') * NormalRV( tt.scalar('\\mu_2'), tt.scalar('\\sigma_2')) tt_2_normalrv_noname_expr *= (tt.scalar('b') * NormalRV( tt_2_normalrv_noname_expr, tt.scalar('\\sigma')) + tt.scalar('c')) expected = 'M in R**(N^M_0 x N^M_1), \\mu_2 in R, \\sigma_2 in R\nb in R, \\sigma in R, c in R\na ~ N(\\mu_2, \\sigma_2**2) in R, d ~ N((M * a), \\sigma**2) in R**(N^d_0 x N^d_1)\n((M * a) * ((b * d) + c))' assert tt_pprint(tt_2_normalrv_noname_expr) == expected
def test_convert_rv_to_dist_shape(): # Make sure we use the `ShapeFeature` to get the shape info X_rv = NormalRV(np.r_[1, 2], 2.0, name="X_rv") fgraph = FunctionGraph(tt_inputs([X_rv]), [X_rv], features=[tt.opt.ShapeFeature()]) with pm.Model(): res = convert_rv_to_dist(fgraph.outputs[0].owner, None) assert isinstance(res.distribution, pm.Normal) assert np.array_equal(res.distribution.shape, np.r_[2])
def test_is_random_variable(): X_rv = NormalRV(0, 1) res = is_random_variable(X_rv) assert res == (X_rv, X_rv) def scan_fn(): Y_t = NormalRV(0, 1, name="Y_t") return Y_t Y_rv, scan_updates = theano.scan( fn=scan_fn, outputs_info=[{}], n_steps=10, ) res = is_random_variable(Y_rv) assert res == (Y_rv, Y_rv.owner.op.outputs[0])
def test_Normal_ShapeFeature(): M_tt = tt.iscalar("M") M_tt.tag.test_value = 3 sd_tt = tt.scalar("sd") sd_tt.tag.test_value = 1.0 d_rv = NormalRV(tt.ones((M_tt, )), sd_tt, size=(2, M_tt)) d_rv.tag.test_value fg = FunctionGraph( [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)], [d_rv], clone=True, features=[tt.opt.ShapeFeature()], ) s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]] assert get_test_value(s1) == get_test_value(d_rv).shape[0] assert get_test_value(s2) == get_test_value(d_rv).shape[1]
def test_Normal_infer_shape(): M_tt = tt.iscalar("M") M_tt.tag.test_value = 3 sd_tt = tt.scalar("sd") sd_tt.tag.test_value = 1.0 test_params = [ ([tt.as_tensor_variable(1.0), sd_tt], None), ([tt.as_tensor_variable(1.0), sd_tt], (M_tt, )), ([tt.as_tensor_variable(1.0), sd_tt], (2, M_tt)), ([tt.zeros((M_tt, )), sd_tt], None), ([tt.zeros((M_tt, )), sd_tt], (M_tt, )), ([tt.zeros((M_tt, )), sd_tt], (2, M_tt)), ([tt.zeros((M_tt, )), tt.ones((M_tt, ))], None), ([tt.zeros((M_tt, )), tt.ones((M_tt, ))], (2, M_tt)), ] for args, size in test_params: rv = NormalRV(*args, size=size) rv_shape = tuple(NormalRV._infer_shape(size or (), args, None)) assert tuple(get_test_value(rv_shape)) == tuple( get_test_value(rv).shape)
def test_scale_loc_transform(): tt.config.compute_test_value = "ignore" rand_state = theano.shared(np.random.RandomState()) mu_a = NormalRV(0.0, 100**2, name="mu_a", rng=rand_state) sigma_a = HalfCauchyRV(5, name="sigma_a", rng=rand_state) mu_b = NormalRV(0.0, 100**2, name="mu_b", rng=rand_state) sigma_b = HalfCauchyRV(5, name="sigma_b", rng=rand_state) county_idx = np.r_[1, 1, 2, 3] # We want the following for a, b: # N(m, S) -> m + N(0, 1) * S a = NormalRV(mu_a, sigma_a, size=(len(county_idx), ), name="a", rng=rand_state) b = NormalRV(mu_b, sigma_b, size=(len(county_idx), ), name="b", rng=rand_state) radon_est = a[county_idx] + b[county_idx] * 7 eps = HalfCauchyRV(5, name="eps", rng=rand_state) radon_like = NormalRV(radon_est, eps, name="radon_like", rng=rand_state) radon_like_rv = observed(tt.as_tensor_variable(np.r_[1.0, 2.0, 3.0, 4.0]), radon_like) q_lv = var() (expr_graph, ) = run( 1, q_lv, non_obs_walko(partial(reduceo, scale_loc_transform), radon_like_rv, q_lv)) radon_like_rv_opt = expr_graph.reify() assert radon_like_rv_opt.owner.op == observed radon_like_opt = radon_like_rv_opt.owner.inputs[1] radon_est_opt = radon_like_opt.owner.inputs[0] # These should now be `tt.add(mu_*, ...)` outputs. a_opt = radon_est_opt.owner.inputs[0].owner.inputs[0] b_opt = radon_est_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0] # Make sure NormalRV gets replaced with an addition assert a_opt.owner.op == tt.add assert b_opt.owner.op == tt.add # Make sure the first term in the addition is the old NormalRV mean mu_a_opt = a_opt.owner.inputs[0].owner.inputs[0] assert "mu_a" == mu_a_opt.name == mu_a.name mu_b_opt = b_opt.owner.inputs[0].owner.inputs[0] assert "mu_b" == mu_b_opt.name == mu_b.name # Make sure the second term in the addition is the standard NormalRV times # the old std. dev. assert a_opt.owner.inputs[1].owner.op == tt.mul assert b_opt.owner.inputs[1].owner.op == tt.mul sigma_a_opt = a_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0] assert sigma_a_opt.owner.op == sigma_a.owner.op sigma_b_opt = b_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0] assert sigma_b_opt.owner.op == sigma_b.owner.op a_std_norm_opt = a_opt.owner.inputs[1].owner.inputs[1] assert a_std_norm_opt.owner.op == NormalRV assert a_std_norm_opt.owner.inputs[0].data == 0.0 assert a_std_norm_opt.owner.inputs[1].data == 1.0 b_std_norm_opt = b_opt.owner.inputs[1].owner.inputs[1] assert b_std_norm_opt.owner.op == NormalRV assert b_std_norm_opt.owner.inputs[0].data == 0.0 assert b_std_norm_opt.owner.inputs[1].data == 1.0
def test_normal_normal_regression(): tt.config.compute_test_value = "ignore" theano.config.cxx = "" np.random.seed(9283) N = 10 M = 3 a_tt = tt.vector("a") R_tt = tt.vector("R") X_tt = tt.matrix("X") V_tt = tt.vector("V") a_tt.tag.test_value = np.random.normal(size=M) R_tt.tag.test_value = np.abs(np.random.normal(size=M)) X = np.random.normal(10, 1, size=N) X = np.c_[np.ones(10), X, X * X] X_tt.tag.test_value = X V_tt.tag.test_value = np.ones(N) beta_rv = NormalRV(a_tt, R_tt, name="\\beta") E_y_rv = X_tt.dot(beta_rv) E_y_rv.name = "E_y" Y_rv = NormalRV(E_y_rv, V_tt, name="Y") y_tt = tt.as_tensor_variable(Y_rv.tag.test_value) y_tt.name = "y" y_obs_rv = observed(y_tt, Y_rv) y_obs_rv.name = "y_obs" # # Use the relation with identify/match `Y`, `X` and `beta`. # y_args_tail_lv, b_args_tail_lv = var(), var() beta_lv = var() y_args_lv, y_lv, Y_lv, X_lv = var(), var(), var(), var() (res, ) = run( 1, (beta_lv, y_args_tail_lv, b_args_tail_lv), applyo(mt.observed, y_args_lv, y_obs_rv), eq(y_args_lv, (y_lv, Y_lv)), normal_normal_regression(Y_lv, X_lv, beta_lv, y_args_tail_lv, b_args_tail_lv), ) # TODO FIXME: This would work if non-op parameters (e.g. names) were covered by # `operator`/`car`. See `TheanoMetaOperator`. assert res[0].eval_obj.obj == beta_rv assert res[0] == etuplize(beta_rv) assert res[1] == etuplize(Y_rv)[2:] assert res[2] == etuplize(beta_rv)[1:] # # Use the relation with to produce `Y` from given `X` and `beta`. # X_new_mt = mt(tt.eye(N, M)) beta_new_mt = mt(NormalRV(0, 1, size=M)) Y_args_cdr_mt = etuplize(Y_rv)[2:] Y_lv = var() (res, ) = run( 1, Y_lv, normal_normal_regression(Y_lv, X_new_mt, beta_new_mt, Y_args_cdr_mt)) Y_out_mt = res.eval_obj Y_new_mt = etuple(mt.NormalRV, mt.dot(X_new_mt, beta_new_mt)) + Y_args_cdr_mt Y_new_mt = Y_new_mt.eval_obj assert Y_out_mt == Y_new_mt
def scan_fn(): Y_t = NormalRV(0, 1, name="Y_t") return Y_t
def test_notex_print(): tt_normalrv_noname_expr = tt.scalar("b") * NormalRV( tt.scalar("\\mu"), tt.scalar("\\sigma")) expected = textwrap.dedent(r""" b in R, \mu in R, \sigma in R a ~ N(\mu, \sigma**2) in R (b * a) """) assert tt_pprint(tt_normalrv_noname_expr) == expected.strip() # Make sure the constant shape is show in values and not symbols. tt_normalrv_name_expr = tt.scalar("b") * NormalRV( tt.scalar("\\mu"), tt.scalar("\\sigma"), size=[2, 1], name="X") expected = textwrap.dedent(r""" b in R, \mu in R, \sigma in R X ~ N(\mu, \sigma**2) in R**(2 x 1) (b * X) """) assert tt_pprint(tt_normalrv_name_expr) == expected.strip() tt_2_normalrv_noname_expr = tt.matrix("M") * NormalRV( tt.scalar("\\mu_2"), tt.scalar("\\sigma_2")) tt_2_normalrv_noname_expr *= tt.scalar("b") * NormalRV( tt_2_normalrv_noname_expr, tt.scalar("\\sigma")) + tt.scalar("c") expected = textwrap.dedent(r""" M in R**(N^M_0 x N^M_1), \mu_2 in R, \sigma_2 in R b in R, \sigma in R, c in R a ~ N(\mu_2, \sigma_2**2) in R, d ~ N((M * a), \sigma**2) in R**(N^d_0 x N^d_1) ((M * a) * ((b * d) + c)) """) assert tt_pprint(tt_2_normalrv_noname_expr) == expected.strip() expected = textwrap.dedent(r""" b in Z, c in Z, M in R**(N^M_0 x N^M_1) M[b, c] """) # TODO: "c" should be "1". assert (tt_pprint( tt.matrix("M")[tt.iscalar("a"), tt.constant(1, dtype="int")]) == expected.strip()) expected = textwrap.dedent(r""" M in R**(N^M_0 x N^M_1) M[1] """) assert tt_pprint(tt.matrix("M")[1]) == expected.strip() expected = textwrap.dedent(r""" M in N**(N^M_0) M[2:4:0] """) assert tt_pprint(tt.vector("M", dtype="uint32")[0:4:2]) == expected.strip() norm_rv = NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma")) rv_obs = observed(tt.constant(1.0, dtype=norm_rv.dtype), norm_rv) expected = textwrap.dedent(r""" \mu in R, \sigma in R a ~ N(\mu, \sigma**2) in R a = 1.0 """) assert tt_pprint(rv_obs) == expected.strip()
def test_tex_print(): tt_normalrv_noname_expr = tt.scalar("b") * NormalRV( tt.scalar("\\mu"), tt.scalar("\\sigma")) expected = textwrap.dedent(r""" \begin{equation} \begin{gathered} b \in \mathbb{R}, \,\mu \in \mathbb{R}, \,\sigma \in \mathbb{R} \\ a \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\, \in \mathbb{R} \end{gathered} \\ (b \odot a) \end{equation} """) assert tt_tprint(tt_normalrv_noname_expr) == expected.strip() tt_normalrv_name_expr = tt.scalar("b") * NormalRV( tt.scalar("\\mu"), tt.scalar("\\sigma"), size=[2, 1], name="X") expected = textwrap.dedent(r""" \begin{equation} \begin{gathered} b \in \mathbb{R}, \,\mu \in \mathbb{R}, \,\sigma \in \mathbb{R} \\ X \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\, \in \mathbb{R}^{2 \times 1} \end{gathered} \\ (b \odot X) \end{equation} """) assert tt_tprint(tt_normalrv_name_expr) == expected.strip() tt_2_normalrv_noname_expr = tt.matrix("M") * NormalRV( tt.scalar("\\mu_2"), tt.scalar("\\sigma_2")) tt_2_normalrv_noname_expr *= tt.scalar("b") * NormalRV( tt_2_normalrv_noname_expr, tt.scalar("\\sigma")) + tt.scalar("c") expected = textwrap.dedent(r""" \begin{equation} \begin{gathered} M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}} \\ \mu_2 \in \mathbb{R}, \,\sigma_2 \in \mathbb{R} \\ b \in \mathbb{R}, \,\sigma \in \mathbb{R}, \,c \in \mathbb{R} \\ a \sim \operatorname{N}\left(\mu_2, {\sigma_2}^{2}\right)\, \in \mathbb{R} \\ d \sim \operatorname{N}\left((M \odot a), {\sigma}^{2}\right)\, \in \mathbb{R}^{N^{d}_{0} \times N^{d}_{1}} \end{gathered} \\ ((M \odot a) \odot ((b \odot d) + c)) \end{equation} """) assert tt_tprint(tt_2_normalrv_noname_expr) == expected.strip() expected = textwrap.dedent(r""" \begin{equation} \begin{gathered} b \in \mathbb{Z}, \,c \in \mathbb{Z}, \,M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}} \end{gathered} \\ M\left[b, \,c\right] \end{equation} """) # TODO: "c" should be "1". assert (tt_tprint( tt.matrix("M")[tt.iscalar("a"), tt.constant(1, dtype="int")]) == expected.strip()) expected = textwrap.dedent(r""" \begin{equation} \begin{gathered} M \in \mathbb{R}^{N^{M}_{0} \times N^{M}_{1}} \end{gathered} \\ M\left[1\right] \end{equation} """) assert tt_tprint(tt.matrix("M")[1]) == expected.strip() expected = textwrap.dedent(r""" \begin{equation} \begin{gathered} M \in \mathbb{N}^{N^{M}_{0}} \end{gathered} \\ M\left[2:4:0\right] \end{equation} """) assert tt_tprint(tt.vector("M", dtype="uint32")[0:4:2]) == expected.strip() norm_rv = NormalRV(tt.scalar("\\mu"), tt.scalar("\\sigma")) rv_obs = observed(tt.constant(1.0, dtype=norm_rv.dtype), norm_rv) expected = textwrap.dedent(r""" \begin{equation} \begin{gathered} \mu \in \mathbb{R}, \,\sigma \in \mathbb{R} \\ a \sim \operatorname{N}\left(\mu, {\sigma}^{2}\right)\, \in \mathbb{R} \end{gathered} \\ a = 1.0 \end{equation} """) assert tt_tprint(rv_obs) == expected.strip()
def test_pymc3_convert_dists(): """Just a basic check that all PyMC3 RVs will convert to and from Theano RVs.""" with pm.Model() as model: norm_rv = pm.Normal("norm_rv", 0.0, 1.0, observed=1.0) mvnorm_rv = pm.MvNormal("mvnorm_rv", np.r_[0.0], np.c_[1.0], shape=1, observed=np.r_[1.0]) cauchy_rv = pm.Cauchy("cauchy_rv", 0.0, 1.0, observed=1.0) halfcauchy_rv = pm.HalfCauchy("halfcauchy_rv", 1.0, observed=1.0) uniform_rv = pm.Uniform("uniform_rv", observed=1.0) gamma_rv = pm.Gamma("gamma_rv", 1.0, 1.0, observed=1.0) invgamma_rv = pm.InverseGamma("invgamma_rv", 1.0, 1.0, observed=1.0) exp_rv = pm.Exponential("exp_rv", 1.0, observed=1.0) halfnormal_rv = pm.HalfNormal("halfnormal_rv", 1.0, observed=1.0) beta_rv = pm.Beta("beta_rv", 2.0, 2.0, observed=1.0) binomial_rv = pm.Binomial("binomial_rv", 10, 0.5, observed=5) dirichlet_rv = pm.Dirichlet("dirichlet_rv", np.r_[0.1, 0.1], observed=np.r_[0.1, 0.1]) poisson_rv = pm.Poisson("poisson_rv", 10, observed=5) bernoulli_rv = pm.Bernoulli("bernoulli_rv", 0.5, observed=0) betabinomial_rv = pm.BetaBinomial("betabinomial_rv", 0.1, 0.1, 10, observed=5) categorical_rv = pm.Categorical("categorical_rv", np.r_[0.5, 0.5], observed=1) multinomial_rv = pm.Multinomial("multinomial_rv", 5, np.r_[0.5, 0.5], observed=np.r_[2]) negbinomial_rv = pm.NegativeBinomial("negbinomial_rv", 10.2, 0.5, observed=5) # Convert to a Theano `FunctionGraph` fgraph = model_graph(model) rvs_by_name = { n.owner.inputs[1].name: n.owner.inputs[1] for n in fgraph.outputs } pymc_rv_names = {n.name for n in model.observed_RVs} assert all( isinstance(rvs_by_name[n].owner.op, RandomVariable) for n in pymc_rv_names) # Now, convert back to a PyMC3 model pymc_model = graph_model(fgraph) new_pymc_rv_names = {n.name for n in pymc_model.observed_RVs} pymc_rv_names == new_pymc_rv_names with pytest.raises(TypeError): graph_model(NormalRV(0, 1), generate_names=False) res = graph_model(NormalRV(0, 1), generate_names=True) assert res.vars[0].name == "normal_0"
def test_pymc_normals(): tt.config.compute_test_value = 'ignore' rand_state = theano.shared(np.random.RandomState()) mu_a = NormalRV(0., 100**2, name='mu_a', rng=rand_state) sigma_a = HalfCauchyRV(5, name='sigma_a', rng=rand_state) mu_b = NormalRV(0., 100**2, name='mu_b', rng=rand_state) sigma_b = HalfCauchyRV(5, name='sigma_b', rng=rand_state) county_idx = np.r_[1, 1, 2, 3] # We want the following for a, b: # N(m, S) -> m + N(0, 1) * S a = NormalRV(mu_a, sigma_a, size=(len(county_idx), ), name='a', rng=rand_state) b = NormalRV(mu_b, sigma_b, size=(len(county_idx), ), name='b', rng=rand_state) radon_est = a[county_idx] + b[county_idx] * 7 eps = HalfCauchyRV(5, name='eps', rng=rand_state) radon_like = NormalRV(radon_est, eps, name='radon_like', rng=rand_state) radon_like_rv = observed(tt.as_tensor_variable(np.r_[1., 2., 3., 4.]), radon_like) graph_mt = mt(radon_like_rv) expr_graph, = run( 1, var('q'), non_obs_fixedp_graph_applyo(scale_loc_transform, graph_mt, var('q'))) radon_like_rv_opt = expr_graph.reify() assert radon_like_rv_opt.owner.op == observed radon_like_opt = radon_like_rv_opt.owner.inputs[1] radon_est_opt = radon_like_opt.owner.inputs[0] # These should now be `tt.add(mu_*, ...)` outputs. a_opt = radon_est_opt.owner.inputs[0].owner.inputs[0] b_opt = radon_est_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0] # Make sure NormalRV gets replaced with an addition assert a_opt.owner.op == tt.add assert b_opt.owner.op == tt.add # Make sure the first term in the addition is the old NormalRV mean mu_a_opt = a_opt.owner.inputs[0].owner.inputs[0] assert 'mu_a' == mu_a_opt.name == mu_a.name mu_b_opt = b_opt.owner.inputs[0].owner.inputs[0] assert 'mu_b' == mu_b_opt.name == mu_b.name # Make sure the second term in the addition is the standard NormalRV times # the old std. dev. assert a_opt.owner.inputs[1].owner.op == tt.mul assert b_opt.owner.inputs[1].owner.op == tt.mul sigma_a_opt = a_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0] assert sigma_a_opt.owner.op == sigma_a.owner.op sigma_b_opt = b_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0] assert sigma_b_opt.owner.op == sigma_b.owner.op a_std_norm_opt = a_opt.owner.inputs[1].owner.inputs[1] assert a_std_norm_opt.owner.op == NormalRV assert a_std_norm_opt.owner.inputs[0].data == 0.0 assert a_std_norm_opt.owner.inputs[1].data == 1.0 b_std_norm_opt = b_opt.owner.inputs[1].owner.inputs[1] assert b_std_norm_opt.owner.op == NormalRV assert b_std_norm_opt.owner.inputs[0].data == 0.0 assert b_std_norm_opt.owner.inputs[1].data == 1.0
def scan_fn(mus_t, sigma_t, S_tm1, Gamma_t, rng): S_t = CategoricalRV(Gamma_t[S_tm1], rng=rng, name="S_t") Y_t = NormalRV(mus_t[S_t], sigma_t, rng=rng, name="Y_t") return S_t, Y_t