def test_BO(ma2): # Log transform of the distance usually smooths the distance surface log_d = elfi.Operation(np.log, ma2['d'], name='log_d') n_init = 20 res_init = elfi.Rejection(log_d, batch_size=5).sample(n_init, quantile=1) bounds = {n: (-2, 2) for n in ma2.parameter_names} bo = elfi.BayesianOptimization( log_d, initial_evidence=res_init.outputs, update_interval=10, batch_size=5, bounds=bounds) assert bo.target_model.n_evidence == n_init assert bo.n_evidence == n_init assert bo.n_precomputed_evidence == n_init assert bo.n_initial_evidence == n_init n1 = 5 bo.infer(n_init + n1) assert bo.target_model.n_evidence == n_init + n1 assert bo.n_evidence == n_init + n1 assert bo.n_precomputed_evidence == n_init assert bo.n_initial_evidence == n_init n2 = 5 bo.infer(n_init + n1 + n2) assert bo.target_model.n_evidence == n_init + n1 + n2 assert bo.n_evidence == n_init + n1 + n2 assert bo.n_precomputed_evidence == n_init assert bo.n_initial_evidence == n_init assert np.array_equal(bo.target_model._gp.X[:n_init, 0], res_init.samples_array[:, 0])
def test_bo(ma2): bs = 2 upd_int = 1 n_evi = 16 init_evi = 10 bounds = {'t1': (-2, 2), 't2': (-1, 1)} anv = .1 bo = elfi.BayesianOptimization( ma2, 'd', initial_evidence=init_evi, update_interval=upd_int, batch_size=bs, bounds=bounds, acq_noise_var=anv) res = bo.infer(n_evidence=n_evi) seed = bo.seed bo = elfi.BayesianOptimization( ma2, 'd', seed=seed, initial_evidence=init_evi, update_interval=upd_int, batch_size=bs, bounds=bounds, acq_noise_var=anv) res_same = bo.infer(n_evidence=n_evi) bo = elfi.BayesianOptimization( ma2, 'd', initial_evidence=init_evi, update_interval=upd_int, batch_size=bs, bounds=bounds, acq_noise_var=anv) res_diff = bo.infer(n_evidence=n_evi) check_consistent_sample(res, res_diff, res_same) assert not np.array_equal(res.x_min, res_diff.x_min) assert np.array_equal(res.x_min, res_same.x_min)
def test_async(ma2): bounds = {n: (-2, 2) for n in ma2.parameter_names} bo = elfi.BayesianOptimization(ma2, 'd', initial_evidence=0, update_interval=2, batch_size=2, bounds=bounds, async=True) samples = 5 bo.infer(samples)
def test_BO_works_with_zero_init_samples(ma2): log_d = elfi.Operation(np.log, ma2['d'], name='log_d') bounds = {n: (-2, 2) for n in ma2.parameter_names} bo = elfi.BayesianOptimization( log_d, initial_evidence=0, update_interval=4, batch_size=2, bounds=bounds) assert bo.target_model.n_evidence == 0 assert bo.n_evidence == 0 assert bo.n_precomputed_evidence == 0 assert bo.n_initial_evidence == 0 n_samples = 4 bo.infer(n_samples) assert bo.target_model.n_evidence == n_samples assert bo.n_evidence == n_samples assert bo.n_precomputed_evidence == 0 assert bo.n_initial_evidence == 0