예제 #1
0
        inf_method = approx_inf.EP(power=0.5, intmethod='UT', damping=0.5)
elif method == 11:
    inf_method = approx_inf.EP(power=0.01, intmethod='UT', damping=0.5)

elif method == 12:
    if fold in [3, 4, 6, 7, 9]:
        inf_method = approx_inf.EP(power=1, intmethod='GH', damping=0.1)
    else:
        inf_method = approx_inf.EP(power=1, intmethod='GH', damping=0.5)
elif method == 13:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=0.5)
elif method == 14:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=0.5)

elif method == 15:
    inf_method = approx_inf.VI(intmethod='UT', damping=0.5)
elif method == 16:
    inf_method = approx_inf.VI(intmethod='GH', damping=0.5)

model = SDEGP(prior=prior, likelihood=lik, t=X, y=Y, approx_inf=inf_method)

opt_init, opt_update, get_params = optimizers.adam(step_size=step_size)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]
예제 #2
0
elif method == 9:
    inf_method = approx_inf.EP(power=1, intmethod='UT')
elif method == 10:
    inf_method = approx_inf.EP(power=0.5, intmethod='UT')
elif method == 11:
    inf_method = approx_inf.EP(power=0.01, intmethod='UT')

elif method == 12:
    inf_method = approx_inf.EP(power=1, intmethod='GH')
elif method == 13:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH')
elif method == 14:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH')

elif method == 15:
    inf_method = approx_inf.VI(intmethod='UT')
elif method == 16:
    inf_method = approx_inf.VI(intmethod='GH')

# plot_2d_classification(None, 0)

np.random.seed(99)
N = X.shape[0]  # number of training points

var_f = 1.  # GP variance
len_time = 1.  # temporal lengthscale
len_space = 1.  # spacial lengthscale

prior = priors.SpatioTemporalMatern52(variance=var_f, lengthscale_time=len_time, lengthscale_space=len_space)

lik = likelihoods.Bernoulli(link='logit')
예제 #3
0
lik = likelihoods.Poisson()

if method == 0:
    inf_method = approx_inf.EKS(damping=.5)
elif method == 1:
    inf_method = approx_inf.UKS(damping=.5)
elif method == 2:
    inf_method = approx_inf.GHKS(damping=.5)
elif method == 3:
    inf_method = approx_inf.EP(power=1, intmethod='GH', damping=.5)
elif method == 4:
    inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=.5)
elif method == 5:
    inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=.5)
elif method == 6:
    inf_method = approx_inf.VI(intmethod='GH', damping=.5)

model = SDEGP(prior=prior, likelihood=lik, t=x_train, y=y_train, t_test=x_test, y_test=y_test,
              approx_inf=inf_method, z=z)

opt_init, opt_update, get_params = optimizers.adam(step_size=1e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])


def gradient_step(i, state, mod):
    params = get_params(state)
    mod.prior.hyp = params[0]
    mod.likelihood.hyp = params[1]

    # grad(Filter) + Smoother:
예제 #4
0
np.random.seed(99)
N = 1000  # number of training points
x = 100 * np.random.rand(N)
f = 6 * np.sin(pi * x / 10.0) / (pi * x / 10.0 + 1)
y_ = f + np.math.sqrt(0.05) * np.random.randn(x.shape[0])
y = np.maximum(np.sign(y_), 0.)

x_test = np.linspace(np.min(x) - 10.0, np.max(x) + 10.0, num=500)

var_f = 1.0  # GP variance
len_f = 5.0  # GP lengthscale

prior = priors.Matern52(variance=var_f, lengthscale=len_f)
lik = likelihoods.Probit()
approx_inf_1 = approx_inf.EP()
approx_inf_2 = approx_inf.VI()

model_1 = SDEGP(prior=prior,
                likelihood=lik,
                t=x,
                y=y,
                t_test=x_test,
                approx_inf=approx_inf_1)
model_2 = SDEGP(prior=prior,
                likelihood=lik,
                t=x,
                y=y,
                t_test=x_test,
                approx_inf=approx_inf_2)

opt_init, opt_update, get_params = optimizers.adam(step_size=5e-1)