elif method == 4:
    inf_method = approx_inf.GHEP(power=1, damping=0.5)
elif method == 5:
    inf_method = approx_inf.GHKS(damping=0.5)

elif method == 6:
    inf_method = approx_inf.EP(power=0.01, intmethod='UT', damping=0.5)
elif method == 7:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=0.5)

elif method == 8:
    inf_method = approx_inf.VI(intmethod='UT', damping=0.5)
elif method == 9:
    inf_method = approx_inf.VI(intmethod='GH', damping=0.5)

model = SDEGP(prior=prior, likelihood=lik, t=X, y=Y, t_test=XT, y_test=YT, approx_inf=inf_method)

neg_log_marg_lik, gradients = model.run()
print(gradients)
neg_log_marg_lik, gradients = model.run()
print(gradients)
neg_log_marg_lik, gradients = model.run()
print(gradients)

print('optimising the hyperparameters ...')
time_taken = np.zeros([10, 1])
for j in range(10):
    t0 = time.time()
    neg_log_marg_lik, gradients = model.run()
    print(gradients)
    t1 = time.time()
# inf_method = approx_inf.ExpectationPropagation(power=0.9, intmethod='UT', damping=0.1)
# inf_method = approx_inf.ExpectationPropagation(power=0.1, intmethod='GH', damping=0.5)
# inf_method = approx_inf.VariationalInference(intmethod='GH', damping=0.5)
# inf_method = approx_inf.VariationalInference(intmethod='UT', damping=0.5)
# inf_method = approx_inf.ExtendedEP(power=0, damping=0.5)
# inf_method = approx_inf.ExtendedKalmanSmoother(damping=0.5)
# inf_method = approx_inf.GaussHermiteKalmanSmoother(damping=0.5)
inf_method = approx_inf.StatisticallyLinearisedEP(power=0.1,
                                                  intmethod='GH',
                                                  damping=0.5)
# inf_method = approx_inf.UnscentedKalmanSmoother(damping=0.5)

model = SDEGP(prior=prior,
              likelihood=lik,
              t=Xall,
              y=Yall,
              t_test=XT,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=5e-2)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
Ejemplo n.º 3
0
var_y = 0.1

theta_prior = jnp.array([var_f, len_f])
theta_lik = jnp.array([])

prior_ = priors.Matern52(theta_prior)
lik_ = likelihoods.SumOfGaussians(theta_lik)
approx_inf_ = EP(power=1.)
# approx_inf_ = PL()
# approx_inf_ = CL(power=0.5)
# approx_inf_ = IKS()
# approx_inf_ = EKEP()

sde_gp_model = SDEGP(prior=prior_,
                     likelihood=lik_,
                     t=x,
                     y=dummy_y,
                     t_test=x_test,
                     approx_inf=approx_inf_)

print('generating some data by sampling from the prior ...')
ground_truth = sde_gp_model.prior_sample(1, t=x)
y = sde_gp_model.likelihood.sample(ground_truth)[:, 0, 0]

# plt.figure(1, figsize=(12, 5))
# plt.clf()
# plt.plot(x, ground_truth[:, 0, 0])
# plt.plot(x, y, '.')
# plt.show()

sde_gp_model = SDEGP(prior=prior_,
                     likelihood=lik_,
Ejemplo n.º 4
0
elif method == 12:
    inf_method = approx_inf.EP(power=1, intmethod='GH', damping=0.05)
elif method == 13:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=0.05)
elif method == 14:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=0.05)

elif method == 15:
    inf_method = approx_inf.VI(intmethod='UT', damping=0.05)
elif method == 16:
    inf_method = approx_inf.VI(intmethod='GH', damping=0.05)

model = SDEGP(prior=prior,
              likelihood=lik,
              t=x_train,
              y=y_train,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=5e-2)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    # neg_log_marg_lik, gradients = mod.run()
Ejemplo n.º 5
0
elif method == 11:
    inf_method = approx_inf.EP(power=0.01, intmethod='UT', damping=damping)

elif method == 12:
    inf_method = approx_inf.EP(power=1, intmethod='GH', damping=damping)
elif method == 13:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=damping)
elif method == 14:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=damping)

elif method == 15:
    inf_method = approx_inf.VI(intmethod='UT', damping=damping)
elif method == 16:
    inf_method = approx_inf.VI(intmethod='GH', damping=damping)

model = SDEGP(prior=prior, likelihood=lik, t=x_train, y=y_train, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=1e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    if ~np.any(np.isnan(params[0])):
        mod.prior.hyp = params[0]
        mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()
Ejemplo n.º 6
0
# inf_method = approx_inf.ExpectationPropagation(power=0.9, intmethod='UT', damping=0.1)
inf_method = approx_inf.ExpectationPropagation(power=0.01,
                                               intmethod='GH',
                                               damping=0.5)
# inf_method = approx_inf.VariationalInference(intmethod='GH', damping=0.5)
# inf_method = approx_inf.VariationalInference(intmethod='UT', damping=0.5)
# inf_method = approx_inf.ExtendedEP(power=0, damping=0.5)
# inf_method = approx_inf.ExtendedKalmanSmoother(damping=0.5)
# inf_method = approx_inf.GaussHermiteKalmanSmoother(damping=0.5)
# inf_method = approx_inf.StatisticallyLinearisedEP(intmethod='UT', damping=0.5)
# inf_method = approx_inf.UnscentedKalmanSmoother(damping=0.5)

model = SDEGP(prior=prior,
              likelihood=lik,
              t=X,
              y=Y,
              t_test=XT,
              y_test=YT,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=5e-2)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
Ejemplo n.º 7
0
N = nr * nt  # number of data points

var_f = 1  # GP variance
len_f = 10  # lengthscale

prior = priors.SpatialMatern32(variance=var_f,
                               lengthscale=len_f,
                               z=r[0, ...],
                               fixed_grid=True)
lik = likelihoods.Poisson()
inf_method = approx_inf.ExtendedKalmanSmoother(damping=0.5)
# inf_method = approx_inf.ExtendedEP()

model = SDEGP(prior=prior,
              likelihood=lik,
              t=t,
              y=Y,
              r=r,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod, plot_num_, mu_prev_):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
Ejemplo n.º 8
0
var_f = 1.  # GP variance
len_f = 5.0  # GP lengthscale

prior = priors.Matern52(variance=var_f, lengthscale=len_f)

lik = likelihoods.Bernoulli(link='logit')
inf_method = approx_inf.ExpectationPropagation(power=0.9, intmethod='UT')
# inf_method = approx_inf.VariationalInference(intmethod='GH')
# inf_method = approx_inf.VariationalInference(intmethod='UT')
# inf_method = approx_inf.ExtendedEP(power=0)
# inf_method = approx_inf.ExtendedKalmanSmoother()
# inf_method = approx_inf.GaussHermiteKalmanSmoother()
# inf_method = approx_inf.StatisticallyLinearisedEP(intmethod='UT')
# inf_method = approx_inf.UnscentedKalmanSmoother()

model = SDEGP(prior=prior, likelihood=lik, t=x, y=y, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()  # <-- less elegant but reduces compile time
Ejemplo n.º 9
0
    inf_method = approx_inf.VI(intmethod='GH')

# plot_2d_classification(None, 0)

np.random.seed(99)
N = X.shape[0]  # number of training points

var_f = 1.  # GP variance
len_time = 1.  # temporal lengthscale
len_space = 1.  # spacial lengthscale

prior = priors.SpatioTemporalMatern52(variance=var_f, lengthscale_time=len_time, lengthscale_space=len_space)

lik = likelihoods.Bernoulli(link='logit')

model = SDEGP(prior=prior, likelihood=lik, t=X, y=Y, r=R, t_test=XT, y_test=YT, r_test=RT, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod, plot_num_, mu_prev_):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()
Ejemplo n.º 10
0
var_f = 0.3  # GP variance
len_time = 0.3  # temporal lengthscale
len_space = 0.3  # spacial lengthscale

prior = priors.SpatioTemporalMatern52(variance=var_f,
                                      lengthscale_time=len_time,
                                      lengthscale_space=len_space)
lik = likelihoods.Probit()
inf_method = approx_inf.ExpectationPropagation(power=0.5)
# inf_method = approx_inf.StatisticallyLinearisedEP()
# inf_method = approx_inf.ExtendedKalmanSmoother()
# inf_method = approx_inf.VariationalInference()

model = SDEGP(prior=prior,
              likelihood=lik,
              t=X,
              y=Y,
              r=R,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod, plot_num_, mu_prev_):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
Ejemplo n.º 11
0
prior = priors.SpatialMatern32(variance=var_f,
                               lengthscale=len_f,
                               z=r[0, ...],
                               fixed_grid=True)
lik = likelihoods.Poisson()
inf_method = approx_inf.ExtendedKalmanSmoother(damping=1.)
# inf_method = approx_inf.ExtendedEP()

# t_spacetime = np.block([t[..., 0][..., None], r])

model = SDEGP(prior=prior,
              likelihood=lik,
              t=t,
              y=Y,
              r=r,
              t_test=t,
              y_test=Y,
              r_test=r,
              approx_inf=inf_method)

neg_log_marg_lik, gradients = model.run_two_stage()
print(gradients)
neg_log_marg_lik, gradients = model.run_two_stage()
print(gradients)
neg_log_marg_lik, gradients = model.run_two_stage()
print(gradients)

print('optimising the hyperparameters ...')
time_taken = np.zeros([10, 1])
for j in range(10):
Ejemplo n.º 12
0
y_ = f + np.math.sqrt(0.05) * np.random.randn(x.shape[0])
y = np.maximum(np.sign(y_), 0.)

x_test = np.linspace(np.min(x) - 10.0, np.max(x) + 10.0, num=500)

var_f = 1.0  # GP variance
len_f = 5.0  # GP lengthscale

prior = priors.Matern52(variance=var_f, lengthscale=len_f)
lik = likelihoods.Probit()
approx_inf_1 = approx_inf.EP()
approx_inf_2 = approx_inf.VI()

model_1 = SDEGP(prior=prior,
                likelihood=lik,
                t=x,
                y=y,
                t_test=x_test,
                approx_inf=approx_inf_1)
model_2 = SDEGP(prior=prior,
                likelihood=lik,
                t=x,
                y=y,
                t_test=x_test,
                approx_inf=approx_inf_2)

opt_init, opt_update, get_params = optimizers.adam(step_size=5e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model_1.prior.hyp, model_1.likelihood.hyp])


def gradient_step(i, state, mod):
Ejemplo n.º 13
0
meanval = np.log(len(disaster_timings) /
                 num_time_bins)  # TODO: incorporate mean

var_f = 1.0  # GP variance
len_f = 1.0  # GP lengthscale

prior = priors.Matern52(variance=var_f, lengthscale=len_f)
lik = likelihoods.Poisson()
# inf_method = approx_inf.EP(power=0.5)
# inf_method = approx_inf.SLEP()
inf_method = approx_inf.EKS()
# inf_method = approx_inf.EEP()
# inf_method = approx_inf.VI()

model = SDEGP(prior=prior, likelihood=lik, t=x, y=y, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=1e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()  # <-- less elegant but reduces compile time
Ejemplo n.º 14
0
    inf_method = approx_inf.GHKS()

elif method == 6:
    inf_method = approx_inf.EP(power=0.01, intmethod='UT')
elif method == 7:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH')

elif method == 8:
    inf_method = approx_inf.VI(intmethod='UT')
elif method == 9:
    inf_method = approx_inf.VI(intmethod='GH')

model = SDEGP(prior=prior,
              likelihood=lik,
              t=x_train,
              y=y_train,
              t_test=x_test,
              y_test=y_test,
              approx_inf=inf_method)

neg_log_marg_lik, gradients = model.run()
print(gradients)
neg_log_marg_lik, gradients = model.run()
print(gradients)
neg_log_marg_lik, gradients = model.run()
print(gradients)

print('optimising the hyperparameters ...')
time_taken = np.zeros([10, 1])
for j in range(10):
    t0 = time.time()
Ejemplo n.º 15
0
len_time = 0.3  # temporal lengthscale
len_space = 0.3  # spacial lengthscale

prior = priors.SpatioTemporalMatern52(variance=var_f,
                                      lengthscale_time=len_time,
                                      lengthscale_space=len_space)
lik = likelihoods.Probit()
inf_method = approx_inf.ExpectationPropagation(power=0.5)
# inf_method = approx_inf.StatisticallyLinearisedEP()
# inf_method = approx_inf.ExtendedKalmanSmoother()
# inf_method = approx_inf.VariationalInference()

model = SDEGP(prior=prior,
              likelihood=lik,
              t=X,
              y=Y,
              r=R,
              t_test=Xtest,
              r_test=Rtest,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod, plot_num_, mu_prev_):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
Ejemplo n.º 16
0
    inf_method = approx_inf.VI(intmethod='GH')

# plot_2d_classification(None, 0)

np.random.seed(99)
N = X.shape[0]  # number of training points

var_f = 1.  # GP variance
len_time = 1.  # temporal lengthscale
len_space = 1.  # spacial lengthscale

prior = priors.SpatioTemporalMatern52(variance=var_f, lengthscale_time=len_time, lengthscale_space=len_space)

lik = likelihoods.Bernoulli(link='logit')

model = SDEGP(prior=prior, likelihood=lik, t=X, y=Y, r=R, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod, plot_num_, mu_prev_):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()
Ejemplo n.º 17
0
elif method == 11:
    inf_method = approx_inf.EP(power=0.01, intmethod='UT')

elif method == 12:
    inf_method = approx_inf.EP(power=1, intmethod='GH')
elif method == 13:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH')
elif method == 14:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH')

elif method == 15:
    inf_method = approx_inf.VI(intmethod='UT')
elif method == 16:
    inf_method = approx_inf.VI(intmethod='GH')

model = SDEGP(prior=prior, likelihood=lik, t=x_train, y=y_train, t_test=x_test, y_test=y_test, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()
Ejemplo n.º 18
0
if method == 0:
    inf_method = approx_inf.EKS(damping=.5)
elif method == 1:
    inf_method = approx_inf.UKS(damping=.5)
elif method == 2:
    inf_method = approx_inf.GHKS(damping=.5)
elif method == 3:
    inf_method = approx_inf.EP(power=1, intmethod='GH', damping=.5)
elif method == 4:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=.5)
elif method == 5:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=.5)
elif method == 6:
    inf_method = approx_inf.VI(intmethod='GH', damping=.5)

model = SDEGP(prior=prior, likelihood=lik, t=x_train, y=y_train, t_test=x_test, y_test=y_test,
              approx_inf=inf_method, z=z)

opt_init, opt_update, get_params = optimizers.adam(step_size=1e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    batch_ind = np.random.permutation(N)[:N_batch]

    # grad(Filter) + Smoother:
Ejemplo n.º 19
0
var_f = 1.0  # GP variance
len_f = 5.0  # GP lengthscale
var_y = 0.5  # observation noise

prior = priors.Matern52(variance=var_f, lengthscale=len_f)
# prior_ = priors.QuasiPeriodicMatern32([var_f, len_f, 20., 50.])
lik = likelihoods.Gaussian(variance=var_y)
inf_method = approx_inf.EP(power=0.5)
# inf_method = approx_inf.EKS()
# inf_method = approx_inf.EEP()
# inf_method = approx_inf.VI()

model = SDEGP(prior=prior,
              likelihood=lik,
              t=x,
              y=y,
              t_test=x_test,
              y_test=y_test,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=5e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
Ejemplo n.º 20
0
elif method == 12:
    if fold in [3, 4, 6, 7, 9]:
        inf_method = approx_inf.EP(power=1, intmethod='GH', damping=0.1)
    else:
        inf_method = approx_inf.EP(power=1, intmethod='GH', damping=0.5)
elif method == 13:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=0.5)
elif method == 14:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=0.5)

elif method == 15:
    inf_method = approx_inf.VI(intmethod='UT', damping=0.5)
elif method == 16:
    inf_method = approx_inf.VI(intmethod='GH', damping=0.5)

model = SDEGP(prior=prior, likelihood=lik, t=X, y=Y, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=step_size)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
    neg_log_marg_lik, gradients = mod.run()
    # neg_log_marg_lik, gradients = mod.run_two_stage()
Ejemplo n.º 21
0
var_f = 1  # GP variance
len_f = 10  # lengthscale

prior = priors.SpatialMatern32(variance=var_f,
                               lengthscale=len_f,
                               z=r[0, ...],
                               fixed_grid=True)
lik = likelihoods.Poisson()
inf_method = approx_inf.ExtendedKalmanSmoother(damping=0.5)
# inf_method = approx_inf.ExtendedEP()

model = SDEGP(prior=prior,
              likelihood=lik,
              t=t,
              y=Y,
              r=r,
              t_test=t,
              y_test=Y,
              r_test=r,
              approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=2e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod, plot_num_, mu_prev_):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]