def test_best_raw_objective_point_unsatisfiable_relative(self): exp = get_branin_experiment() # Optimization config with unsatisfiable constraint opt_conf = exp.optimization_config.clone() opt_conf.outcome_constraints.append( OutcomeConstraint( metric=get_branin_metric(), op=ComparisonOp.GEQ, bound=9999, relative=True, ) ) trial = exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() trial.mark_completed() with self.assertLogs(logger="ax.service.utils.best_point", level="WARN") as lg: get_best_raw_objective_point(exp, opt_conf) self.assertTrue( any("No status quo provided" in warning for warning in lg.output), msg=lg.output, ) exp.status_quo = Arm(parameters={"x1": 0, "x2": 0}, name="status_quo") sq_trial = exp.new_trial( generator_run=GeneratorRun(arms=[exp.status_quo]) ).run() sq_trial.mark_completed() with self.assertRaisesRegex(ValueError, "No points satisfied"): get_best_raw_objective_point(exp, opt_conf)
def test_split_by_arm(self): gm = {"hello": "world"} generator_run = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=get_optimization_config(), search_space=get_search_space(), gen_metadata=gm, ) generator_runs = generator_run.split_by_arm() self.assertEqual(len(generator_runs), len(self.arms)) for a, w, gr in zip(self.arms, self.weights, generator_runs): with self.subTest(a=a, w=w, gr=gr): # Make sure correct arms and weights appear in split # generator runs. self.assertEqual(gr.arms, [a]) self.assertEqual(gr.weights, [w]) self.assertEqual( gr._generator_run_type, generator_run._generator_run_type ) self.assertEqual(gr._model_key, generator_run._model_key) self.assertEqual( gr._generation_step_index, generator_run._generation_step_index ) self.assertIsNone(gr._optimization_config) self.assertIsNone(gr._search_space) self.assertIsNone(gr._gen_metadata)
def testInit(self): self.assertEqual( len(self.unweighted_run.optimization_config.outcome_constraints), len(self.optimization_config.outcome_constraints), ) self.assertEqual( len(self.unweighted_run.search_space.parameters), len(self.search_space.parameters), ) self.assertEqual(str(self.unweighted_run), GENERATOR_RUN_STR) self.assertIsNotNone(self.unweighted_run.time_created) self.assertEqual(self.unweighted_run.generator_run_type, None) self.assertEqual(self.unweighted_run.fit_time, 4.0) self.assertEqual(self.unweighted_run.gen_time, 10.0) with self.assertRaises(ValueError): GeneratorRun( arms=self.arms, weights=[], optimization_config=self.optimization_config, search_space=self.search_space, ) with self.assertRaises(ValueError): GeneratorRun(arms=self.arms, model_kwargs={"a": 1}) with self.assertRaises(ValueError): GeneratorRun(arms=self.arms, model_key="b", bridge_kwargs={"a": 1})
def generator_run_from_json( object_json: Dict[str, Any], decoder_registry: Dict[str, Type], class_decoder_registry: Dict[str, Callable[[Dict[str, Any]], Any]], ) -> GeneratorRun: """Load Ax GeneratorRun from JSON.""" time_created_json = object_json.pop("time_created") type_json = object_json.pop("generator_run_type") index_json = object_json.pop("index") generator_run = GeneratorRun( **{ k: object_from_json( v, decoder_registry=decoder_registry, class_decoder_registry=class_decoder_registry, ) for k, v in object_json.items() }) generator_run._time_created = object_from_json( time_created_json, decoder_registry=decoder_registry, class_decoder_registry=class_decoder_registry, ) generator_run._generator_run_type = object_from_json( type_json, decoder_registry=decoder_registry, class_decoder_registry=class_decoder_registry, ) generator_run._index = object_from_json( index_json, decoder_registry=decoder_registry, class_decoder_registry=class_decoder_registry, ) return generator_run
def add_generator_run(self, generator_run: GeneratorRun, multiplier: float = 1.0) -> Trial: """Add a generator run to the trial. Note: since trial includes only one arm, this will raise a ValueError if the generator run includes multiple arms. Returns: The trial instance. """ # Copy the generator run, to preserve initial and skip mutations to arms. generator_run = generator_run.clone() if len(generator_run.arms) > 1: raise ValueError( "Trial includes only one arm, but this generator run " "included multiple.") self.experiment.search_space.check_types( generator_run.arms[0].parameters, raise_error=True) self._check_existing_and_name_arm(generator_run.arms[0]) self._generator_run = generator_run generator_run.index = 0 self._set_generation_step_index( generation_step_index=generator_run._generation_step_index) return self
def generator_run_from_sqa( self, generator_run_sqa: SQAGeneratorRun) -> GeneratorRun: """Convert SQLAlchemy GeneratorRun to Ax GeneratorRun.""" arms = [] weights = [] opt_config = None search_space = None for arm_sqa in generator_run_sqa.arms: arms.append(self.arm_from_sqa(arm_sqa=arm_sqa)) weights.append(arm_sqa.weight) opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa( metrics_sqa=generator_run_sqa.metrics) if len(tracking_metrics) > 0: raise SQADecodeError( # pragma: no cover "GeneratorRun should not have tracking metrics.") search_space = self.search_space_from_sqa( parameters_sqa=generator_run_sqa.parameters, parameter_constraints_sqa=generator_run_sqa.parameter_constraints, ) best_arm_predictions = None model_predictions = None if (generator_run_sqa.best_arm_parameters is not None and generator_run_sqa.best_arm_predictions is not None): best_arm = Arm( name=generator_run_sqa.best_arm_name, parameters=generator_run_sqa.best_arm_parameters, ) best_arm_predictions = ( best_arm, tuple(generator_run_sqa.best_arm_predictions), ) model_predictions = (tuple(generator_run_sqa.model_predictions) if generator_run_sqa.model_predictions is not None else None) generator_run = GeneratorRun( arms=arms, weights=weights, optimization_config=opt_config, search_space=search_space, fit_time=generator_run_sqa.fit_time, gen_time=generator_run_sqa.gen_time, best_arm_predictions=best_arm_predictions, model_predictions=model_predictions, ) generator_run._time_created = generator_run_sqa.time_created generator_run._generator_run_type = self.get_enum_name( value=generator_run_sqa.generator_run_type, enum=self.config.generator_run_type_enum, ) generator_run._index = generator_run_sqa.index return generator_run
def test_adding_new_trials(self): new_arm = get_arms()[1] new_trial = self.experiment.new_trial( generator_run=GeneratorRun(arms=[new_arm]) ) with self.assertRaises(ValueError): self.experiment.new_trial(generator_run=GeneratorRun(arms=get_arms())) self.assertEqual(new_trial.arms_by_name["1_0"], new_arm) with self.assertRaises(KeyError): self.trial.arms_by_name["1_0"]
def setUp(self) -> None: self.experiment = get_experiment() self.arm = Arm({"x": 1, "y": "foo", "z": True, "w": 4}) self.trial = self.experiment.new_trial(GeneratorRun([self.arm])) self.experiment_2 = get_experiment() self.batch_trial = self.experiment_2.new_batch_trial(GeneratorRun([self.arm])) self.batch_trial.set_status_quo_with_weight(self.experiment_2.status_quo, 1) self.obs_feat = ObservationFeatures.from_arm( arm=self.trial.arm, trial_index=np.int64(self.trial.index) )
def generator_run_from_json(object_json: Dict[str, Any]) -> GeneratorRun: """Load Ax GeneratorRun from JSON.""" time_created_json = object_json.pop("time_created") type_json = object_json.pop("generator_run_type") index_json = object_json.pop("index") generator_run = GeneratorRun( **{k: object_from_json(v) for k, v in object_json.items()}) generator_run._time_created = object_from_json(time_created_json) generator_run._generator_run_type = object_from_json(type_json) generator_run._index = object_from_json(index_json) return generator_run
def test_adding_new_trials(self): new_arm = get_arms()[1] cand_metadata = {new_arm.signature: {"a": "b"}} new_trial = self.experiment.new_trial(generator_run=GeneratorRun( arms=[new_arm], candidate_metadata_by_arm_signature=cand_metadata)) with self.assertRaises(ValueError): self.experiment.new_trial(generator_run=GeneratorRun( arms=get_arms())) self.assertEqual(new_trial.arms_by_name["1_0"], new_arm) with self.assertRaises(KeyError): self.trial.arms_by_name["1_0"] self.assertEqual( new_trial._get_candidate_metadata_from_all_generator_runs(), {"1_0": cand_metadata[new_arm.signature]}, )
def add_generator_run( self, generator_run: GeneratorRun, multiplier: float = 1.0 ) -> BatchTrial: """Add a generator run to the trial. The arms and weights from the generator run will be merged with the existing arms and weights on the trial, and the generator run object will be linked to the trial for tracking. Args: generator_run: The generator run to be added. multiplier: The multiplier applied to input weights before merging with the current set of arms and weights. Returns: The trial instance. """ # First validate generator run arms for arm in generator_run.arms: self.experiment.search_space.check_types(arm.parameters, raise_error=True) # Clone arms to avoid mutating existing state generator_run._arm_weight_table = OrderedDict( { arm_sig: ArmWeight(arm_weight.arm.clone(), arm_weight.weight) for arm_sig, arm_weight in generator_run._arm_weight_table.items() } ) # Add names to arms # For those not yet added to this experiment, create a new name # Else, use the name of the existing arm for arm in generator_run.arms: self._check_existing_and_name_arm(arm) self._generator_run_structs.append( GeneratorRunStruct(generator_run=generator_run, weight=multiplier) ) generator_run.index = len(self._generator_run_structs) - 1 if self.status_quo is not None and self.optimize_for_power: self.set_status_quo_and_optimize_power(status_quo=not_none(self.status_quo)) self._set_generation_step_index( generation_step_index=generator_run._generation_step_index ) self._refresh_arms_by_name() return self
def test_sobol_GPEI_strategy_batches(self): mock_GPEI_gen = self.mock_torch_model_bridge.return_value.gen mock_GPEI_gen.return_value = GeneratorRun( arms=[ Arm(parameters={"x1": 1, "x2": 2}), Arm(parameters={"x1": 3, "x2": 4}), ] ) exp = get_branin_experiment() sobol_GPEI_generation_strategy = GenerationStrategy( name="Sobol+GPEI", steps=[ GenerationStep(model=Models.SOBOL, num_trials=1), GenerationStep(model=Models.GPEI, num_trials=6), ], ) self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI") self.assertEqual(sobol_GPEI_generation_strategy.model_transitions, [1]) gr = sobol_GPEI_generation_strategy.gen(exp, n=2) exp.new_batch_trial(generator_run=gr).run() for i in range(1, 8): if i == 7: # Check completeness error message. with self.assertRaises(GenerationStrategyCompleted): g = sobol_GPEI_generation_strategy.gen(exp, n=2) else: g = sobol_GPEI_generation_strategy.gen(exp, n=2) exp.new_batch_trial(generator_run=g).run() self.assertIsInstance(sobol_GPEI_generation_strategy.model, TorchModelBridge)
def setUp(self): self.gr = GeneratorRun(arms=[Arm(parameters={"x1": 1, "x2": 2})]) # Mock out slow GPEI. self.torch_model_bridge_patcher = patch( f"{TorchModelBridge.__module__}.TorchModelBridge", spec=True) self.mock_torch_model_bridge = self.torch_model_bridge_patcher.start() self.mock_torch_model_bridge.return_value.gen.return_value = self.gr # Mock out slow TS. self.discrete_model_bridge_patcher = patch( f"{DiscreteModelBridge.__module__}.DiscreteModelBridge", spec=True) self.mock_discrete_model_bridge = self.discrete_model_bridge_patcher.start( ) self.mock_discrete_model_bridge.return_value.gen.return_value = self.gr # Mock in `Models` registry self.registry_setup_dict_patcher = patch.dict( f"{Models.__module__}.MODEL_KEY_TO_MODEL_SETUP", { "Factorial": MODEL_KEY_TO_MODEL_SETUP["Factorial"]._replace( bridge_class=self.mock_discrete_model_bridge), "Thompson": MODEL_KEY_TO_MODEL_SETUP["Thompson"]._replace( bridge_class=self.mock_discrete_model_bridge), "GPEI": MODEL_KEY_TO_MODEL_SETUP["GPEI"]._replace( bridge_class=self.mock_torch_model_bridge), }, ) self.mock_in_registry = self.registry_setup_dict_patcher.start() # model bridges are mocked, which makes kwargs' validation difficult, # so for now we will skip it in the generation strategy tests. # NOTE: Starting with Python3.8 this is not a problem as `autospec=True` # ensures that the mocks have correct signatures, but in earlier # versions kwarg validation on mocks does not really work. self.step_model_kwargs = {"silently_filter_kwargs": True} self.hss_experiment = get_hierarchical_search_space_experiment() self.sobol_GPEI_GS = GenerationStrategy( name="Sobol+GPEI", steps=[ GenerationStep( model=Models.SOBOL, num_trials=5, model_kwargs=self.step_model_kwargs, ), GenerationStep(model=Models.GPEI, num_trials=2, model_kwargs=self.step_model_kwargs), ], ) self.sobol_GS = GenerationStrategy(steps=[ GenerationStep( Models.SOBOL, num_trials=-1, should_deduplicate=True, ) ])
def test_fail_on_batch(self): ax_client = AxClient() ax_client.create_experiment( parameters=[ { "name": "x", "type": "range", "bounds": [-5.0, 10.0] }, { "name": "y", "type": "range", "bounds": [0.0, 15.0] }, ], minimize=True, ) batch_trial = ax_client.experiment.new_batch_trial( generator_run=GeneratorRun(arms=[ Arm(parameters={ "x": 0, "y": 1 }), Arm(parameters={ "x": 0, "y": 1 }), ])) with self.assertRaises(NotImplementedError): ax_client.complete_trial(batch_trial.index, 0)
def add_generator_run(self, generator_run: GeneratorRun, multiplier: float = 1.0) -> "Trial": """Add a generator run to the trial. Note: since trial includes only one arm, this will raise a ValueError if the generator run includes multiple arms. Returns: The trial instance. """ if len(generator_run.arms) > 1: raise ValueError( "Trial includes only one arm, but this generator run " "included multiple.") self.experiment.search_space.check_types( generator_run.arms[0].parameters, raise_error=True) self._check_existing_and_name_arm(generator_run.arms[0]) self._generator_run = generator_run generator_run.index = 0 return self
def setUp(self): self.gr = GeneratorRun(arms=[Arm(parameters={"x1": 1, "x2": 2})]) # Mock out slow GPEI. self.mock_torch_model_bridge = patch( f"{TorchModelBridge.__module__}.TorchModelBridge", spec=True).start() self.mock_torch_model_bridge.return_value.gen.return_value = self.gr # Mock out slow TS. self.mock_discrete_model_bridge = patch( f"{DiscreteModelBridge.__module__}.DiscreteModelBridge", spec=True).start() self.mock_discrete_model_bridge.return_value.gen.return_value = self.gr # Mock in `Models` registry self.mock_in_registry = patch.dict( f"{Models.__module__}.MODEL_KEY_TO_MODEL_SETUP", { "Factorial": MODEL_KEY_TO_MODEL_SETUP["Factorial"]._replace( bridge_class=self.mock_discrete_model_bridge), "Thompson": MODEL_KEY_TO_MODEL_SETUP["Thompson"]._replace( bridge_class=self.mock_discrete_model_bridge), "GPEI": MODEL_KEY_TO_MODEL_SETUP["GPEI"]._replace( bridge_class=self.mock_torch_model_bridge), }, ).start() self.mock_discrete_model_bridge.return_value.gen.return_value = self.gr self.mock_torch_model_bridge.return_value.gen.return_value = self.gr
def test_add_trial_same_arm(self): # Check that adding new arm w/out name works correctly. new_trial1 = self.experiment.new_trial(generator_run=GeneratorRun( arms=[self.arm.clone(clear_name=True)])) self.assertEqual(new_trial1.arm.name, self.trial.arm.name) self.assertFalse(new_trial1.arm is self.trial.arm) # Check that adding new arm with name works correctly. new_trial2 = self.experiment.new_trial(generator_run=GeneratorRun( arms=[self.arm.clone()])) self.assertEqual(new_trial2.arm.name, self.trial.arm.name) self.assertFalse(new_trial2.arm is self.trial.arm) arm_wrong_name = self.arm.clone(clear_name=True) arm_wrong_name.name = "wrong_name" with self.assertRaises(ValueError): new_trial2 = self.experiment.new_trial(generator_run=GeneratorRun( arms=[arm_wrong_name]))
def get_generator_run() -> GeneratorRun: arms = get_arms_from_dict(get_arm_weights1()) weights = get_weights_from_dict(get_arm_weights1()) optimization_config = get_optimization_config() search_space = get_search_space() arm_predictions = get_model_predictions_per_arm() return GeneratorRun( arms=arms, weights=weights, optimization_config=optimization_config, search_space=search_space, model_predictions=get_model_predictions(), best_arm_predictions=(arms[0], arm_predictions[arms[0].signature]), fit_time=10.0, gen_time=5.0, model_key="Sobol", model_kwargs={ "scramble": False, "torch_device": torch.device("cpu") }, bridge_kwargs={ "transforms": Cont_X_trans, "torch_dtype": torch.double }, generation_step_index=0, candidate_metadata_by_arm_signature={ a.signature: { "md_key": f"md_val_{a.signature}" } for a in arms }, )
def testAddGeneratorRun(self): self.assertEqual(len(self.batch.arms), len(self.arms)) self.assertEqual(len(self.batch.generator_run_structs), 1) self.assertEqual(sum(self.batch.weights), sum(self.weights)) # one of these arms already exists on the BatchTrial, # so we should just update its weight new_arms = [ Arm(parameters={ "w": 0.75, "x": 1, "y": "foo", "z": True }), Arm(parameters={ "w": 1.4, "x": 5, "y": "bar", "z": False }), ] new_weights = [0.75, 0.25] gr = GeneratorRun(arms=new_arms, weights=new_weights) self.batch.add_generator_run(gr, 2.0) self.assertEqual(len(self.batch.arms), len(self.arms) + 1) self.assertEqual(len(self.batch.generator_run_structs), 2) self.assertEqual(sum(self.batch.weights), sum(self.weights) + 2)
def testInitWithGeneratorRun(self): generator_run = GeneratorRun(arms=self.arms, weights=self.weights) batch = self.experiment.new_batch_trial(generator_run=generator_run) batch.add_arms_and_weights(arms=self.arms, weights=self.weights) self.assertEqual(self.batch.arms_by_name["0_0"], self.batch.arms[0]) self.assertEqual(self.batch.arms_by_name["0_1"], self.batch.arms[1]) self.assertEqual(len(batch.arms), len(self.arms)) self.assertEqual(len(self.batch.generator_run_structs), 1)
def setUp(self) -> None: self.experiment = get_experiment() self.trial = self.experiment.new_trial( GeneratorRun([Arm({ "x": 1, "y": "foo", "z": True, "w": 4 })]))
def testAddGeneratorRunValidation(self): new_batch_trial = self.experiment.new_batch_trial() new_arms = [ Arm(name="0_1", parameters={"w": 0.75, "x": 1, "y": "foo", "z": True}), Arm(name="0_2", parameters={"w": 0.75, "x": 1, "y": "foo", "z": True}), ] gr = GeneratorRun(arms=new_arms) with self.assertRaises(ValueError): new_batch_trial.add_generator_run(gr)
def testMergeDuplicateArm(self): arms = self.arms + [self.arms[0]] run = GeneratorRun( arms=arms, optimization_config=self.optimization_config, search_space=self.search_space, model_predictions=self.model_predictions, ) self.assertEqual(str(run), GENERATOR_RUN_STR_PLUS_1)
def add_arm(self, arm: Arm) -> "Trial": """Add arm to the trial. Returns: The trial instance. """ return self.add_generator_run(generator_run=GeneratorRun( arms=[arm], type=GeneratorRunType.MANUAL.name))
def testGenMetadata(self): gm = {"hello": "world"} generator_run = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=get_optimization_config(), search_space=get_search_space(), gen_metadata=gm, ) self.assertEqual(generator_run.gen_metadata, gm)
def testOptionalObjectiveName(self) -> None: experiment = SimpleExperiment( name="test_branin", search_space=get_branin_search_space(), evaluation_function=sum_evaluation_function_v2, ) for i in range(len(self.arms)): experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]])) self.assertFalse(experiment.eval().df.empty)
def testEvaluationFunctionV4Numpy(self) -> None: experiment = SimpleExperiment( name="test_branin", search_space=get_branin_search_space(), objective_name="sum", evaluation_function=sum_evaluation_function_v4_numpy, ) for i in range(len(self.arms)): experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]])) self.assertFalse(experiment.eval().df.empty)
def testBestArm(self): generator_run = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=get_optimization_config(), search_space=get_search_space(), best_arm_predictions=(self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})), ) self.assertEqual( generator_run.best_arm_predictions, (self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})), )
def test_best_raw_objective_point(self): exp = get_branin_experiment() with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point(exp) self.assertEqual(get_best_parameters(exp), None) exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() opt_conf = exp.optimization_config.clone() opt_conf.objective.metric._name = "not_branin" with self.assertRaisesRegex(ValueError, "No data has been logged"): get_best_raw_objective_point(exp, opt_conf)
def testObservationsWithCandidateMetadata(self): SOME_METADATA_KEY = "metadatum" truth = [ { "arm_name": "0_0", "parameters": {"x": 0, "y": "a"}, "mean": 2.0, "sem": 2.0, "trial_index": 0, "metric_name": "a", }, { "arm_name": "1_0", "parameters": {"x": 1, "y": "b"}, "mean": 3.0, "sem": 3.0, "trial_index": 1, "metric_name": "a", }, ] arms = { obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"]) for obs in truth } experiment = Mock() experiment._trial_indices_by_status = {status: set() for status in TrialStatus} trials = { obs["trial_index"]: Trial( experiment, GeneratorRun( arms=[arms[obs["arm_name"]]], candidate_metadata_by_arm_signature={ arms[obs["arm_name"]].signature: { SOME_METADATA_KEY: f"value_{obs['trial_index']}" } }, ), ) for obs in truth } type(experiment).arms_by_name = PropertyMock(return_value=arms) type(experiment).trials = PropertyMock(return_value=trials) df = pd.DataFrame(truth)[ ["arm_name", "trial_index", "mean", "sem", "metric_name"] ] data = Data(df=df) observations = observations_from_data(experiment, data) for observation in observations: self.assertEqual( observation.features.metadata.get(SOME_METADATA_KEY), f"value_{observation.features.trial_index}", )