def get_hartmann_metric(name="hartmann") -> Hartmann6Metric: param_names = [f"x{idx + 1}" for idx in range(6)] return Hartmann6Metric(name=name, param_names=param_names, noise_sd=0.01)
def get_hartmann_metric() -> Hartmann6Metric: return Hartmann6Metric(name="hartmann", param_names=["x1", "x2"], noise_sd=0.01)
### Hartmann6 problem, D=100 and D=1000 # Relevant parameters were chosen randomly using # x = np.arange(100) # np.random.seed(10) # np.random.shuffle(x) # print(x[:6]) # [19 14 43 37 66 3] hartmann6_100 = BenchmarkProblem( name="Hartmann6, D=100", optimal_value=-3.32237, optimization_config=OptimizationConfig( objective=Objective( metric=Hartmann6Metric( name="objective", param_names=["x19", "x14", "x43", "x37", "x66", "x3"], noise_sd=0.0, ), minimize=True, ) ), search_space=SearchSpace( parameters=[ RangeParameter( name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0 ) for i in range(100) ] ), )
metric=NegativeBraninMetric( name="neg_branin", param_names=["x1", "x2"], noise_sd=5.0), minimize=False, )), search_space=get_branin_search_space(), ) # Hartmann 6 problems hartmann6 = BenchmarkProblem( name=hartmann6_function.name, fbest=hartmann6_function.fmin, optimization_config=OptimizationConfig(objective=Objective( metric=Hartmann6Metric( name=hartmann6_function.name, param_names=[f"x{i}" for i in range(6)], noise_sd=0.01, ), minimize=True, )), search_space=SearchSpace(parameters=[ RangeParameter( name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=param_domain[0], upper=param_domain[1], ) for i, param_domain in enumerate(hartmann6_function.domain) ]), ) hartmann6_constrained = BenchmarkProblem(
def test_REMBOStrategy(self, mock_fit_gpytorch_model, mock_optimize_acqf): # Construct a high-D test experiment with multiple metrics hartmann_search_space = SearchSpace( parameters=[ RangeParameter( name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0, ) for i in range(20) ] ) exp = Experiment( name="test", search_space=hartmann_search_space, optimization_config=OptimizationConfig( objective=Objective( metric=Hartmann6Metric( name="hartmann6", param_names=[f"x{i}" for i in range(6)] ), minimize=True, ), outcome_constraints=[ OutcomeConstraint( metric=L2NormMetric( name="l2norm", param_names=[f"x{i}" for i in range(6)], noise_sd=0.2, ), op=ComparisonOp.LEQ, bound=1.25, relative=False, ) ], ), runner=SyntheticRunner(), ) # Instantiate the strategy gs = REMBOStrategy(D=20, d=6, k=4, init_per_proj=4) # Check that arms and data are correctly segmented by projection exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run() self.assertEqual(len(gs.arms_by_proj[0]), 2) self.assertEqual(len(gs.arms_by_proj[1]), 0) exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run() self.assertEqual(len(gs.arms_by_proj[0]), 2) self.assertEqual(len(gs.arms_by_proj[1]), 2) # Iterate until the first projection fits a GP for _ in range(4): exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run() mock_fit_gpytorch_model.assert_not_called() self.assertEqual(len(gs.arms_by_proj[0]), 4) self.assertEqual(len(gs.arms_by_proj[1]), 4) self.assertEqual(len(gs.arms_by_proj[2]), 2) self.assertEqual(len(gs.arms_by_proj[3]), 2) # Keep iterating until GP is used for gen for i in range(4): # First two trials will go towards 3rd and 4th proj. getting enough if i < 1: # data for GP. self.assertLess(len(gs.arms_by_proj[2]), 4) if i < 2: self.assertLess(len(gs.arms_by_proj[3]), 4) exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run() if i < 2: mock_fit_gpytorch_model.assert_not_called() else: # After all proj. have > 4 arms' worth of data, GP can be fit. self.assertFalse(any(len(x) < 4 for x in gs.arms_by_proj.values())) mock_fit_gpytorch_model.assert_called() self.assertTrue(len(gs.model_transitions) > 0) gs2 = gs.clone_reset() self.assertEqual(gs2.D, 20) self.assertEqual(gs2.d, 6)