Пример #1
0
cpp_gp_loglikelihood = cppGaussianProcessLogLikelihoodMCMC(
    historical_data=init_data,
    derivatives=derivatives,
    prior=prior,
    chain_length=1000,
    burnin_steps=2000,
    n_hypers=2 ** 4,
    noisy=noisy,
)
cpp_gp_loglikelihood.train()

py_sgd_params_ps = pyGradientDescentParameters(
    max_num_steps=1000,
    max_num_restarts=3,
    num_steps_averaged=15,
    gamma=0.7,
    pre_mult=1.0,
    max_relative_change=0.02,
    tolerance=1.0e-10,
)

cpp_sgd_params_ps = cppGradientDescentParameters(
    num_multistarts=1,
    max_num_steps=6,
    max_num_restarts=1,
    num_steps_averaged=3,
    gamma=0.0,
    pre_mult=1.0,
    max_relative_change=0.1,
    tolerance=1.0e-10,
)
Пример #2
0
# noisy = False means the underlying function being optimized is noise-free
cpp_gp_loglikelihood = cppGaussianProcessLogLikelihoodMCMC(
    historical_data=init_data,
    derivatives=derivatives,
    prior=prior,
    chain_length=1000,
    burnin_steps=2000,
    n_hypers=2**4,
    noisy=True)
cpp_gp_loglikelihood.train()

py_sgd_params_ps = pyGradientDescentParameters(max_num_steps=100,
                                               max_num_restarts=3,
                                               num_steps_averaged=15,
                                               gamma=0.7,
                                               pre_mult=0.01,
                                               max_relative_change=0.02,
                                               tolerance=1.0e-8)

py_sgd_params_acquisition = pyGradientDescentParameters(
    max_num_steps=50,
    max_num_restarts=1,
    num_steps_averaged=0,
    gamma=0.7,
    pre_mult=1.0,
    max_relative_change=0.1,
    tolerance=1.0e-8)

cpp_sgd_params_ps = cppGradientDescentParameters(num_multistarts=1,
                                                 max_num_steps=6,