def get_branin_arms(n: int, seed: int) -> List[Arm]: # TODO replace with sobol np.random.seed(seed) x1_raw = np.random.rand(n) x2_raw = np.random.rand(n) return [ Arm(parameters={"x1": -5 + x1_raw[i] * 15, "x2": x2_raw[i] * 15}) for i in range(n) ]
def get_arm_weights1() -> MutableMapping[Arm, float]: parameters_dicts: List[TParameterization] = [ {"w": 0.85, "x": 1, "y": "baz", "z": False}, {"w": 0.75, "x": 1, "y": "foo", "z": True}, {"w": 1.4, "x": 2, "y": "bar", "z": True}, ] arms = [Arm(param_dict) for param_dict in parameters_dicts] weights = [0.25, 0.5, 0.25] return OrderedDict(zip(arms, weights))
def get_arm_weights2() -> MutableMapping[Arm, float]: # update parameters_dicts: List[TParameterization] = [ {"w": 0.96, "x": 3, "y": "hello", "z": True}, {"w": 0.16, "x": 4, "y": "dear", "z": True}, {"w": 3.1, "x": 5, "y": "world", "z": False}, ] arms = [Arm(param_dict) for param_dict in parameters_dicts] weights = [0.25, 0.5, 0.25] return OrderedDict(zip(arms, weights))
def generator_run_from_sqa( self, generator_run_sqa: SQAGeneratorRun) -> GeneratorRun: """Convert SQLAlchemy GeneratorRun to Ax GeneratorRun.""" arms = [] weights = [] opt_config = None search_space = None for arm_sqa in generator_run_sqa.arms: arms.append(self.arm_from_sqa(arm_sqa=arm_sqa)) weights.append(arm_sqa.weight) opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa( metrics_sqa=generator_run_sqa.metrics) if len(tracking_metrics) > 0: raise SQADecodeError( # pragma: no cover "GeneratorRun should not have tracking metrics.") search_space = self.search_space_from_sqa( parameters_sqa=generator_run_sqa.parameters, parameter_constraints_sqa=generator_run_sqa.parameter_constraints, ) best_arm_predictions = None model_predictions = None if (generator_run_sqa.best_arm_parameters is not None and generator_run_sqa.best_arm_predictions is not None): best_arm = Arm( name=generator_run_sqa.best_arm_name, parameters=generator_run_sqa.best_arm_parameters, ) best_arm_predictions = ( best_arm, tuple(generator_run_sqa.best_arm_predictions), ) model_predictions = (tuple(generator_run_sqa.model_predictions) if generator_run_sqa.model_predictions is not None else None) generator_run = GeneratorRun( arms=arms, weights=weights, optimization_config=opt_config, search_space=search_space, fit_time=generator_run_sqa.fit_time, gen_time=generator_run_sqa.gen_time, best_arm_predictions=best_arm_predictions, model_predictions=model_predictions, ) generator_run._time_created = generator_run_sqa.time_created generator_run._generator_run_type = self.get_enum_name( value=generator_run_sqa.generator_run_type, enum=self.config.generator_run_type_enum, ) generator_run._index = generator_run_sqa.index return generator_run
def _init_experiment_from_sqa(self, experiment_sqa: SQAExperiment) -> Experiment: """First step of conversion within experiment_from_sqa.""" opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa( metrics_sqa=experiment_sqa.metrics) search_space = self.search_space_from_sqa( parameters_sqa=experiment_sqa.parameters, parameter_constraints_sqa=experiment_sqa.parameter_constraints, ) if search_space is None: raise SQADecodeError( # pragma: no cover "Experiment SearchSpace cannot be None.") status_quo = ( Arm( # pyre-fixme[6]: Expected `Dict[str, Optional[Union[bool, float, # int, str]]]` for 1st param but got `Optional[Dict[str, # Optional[Union[bool, float, int, str]]]]`. parameters=experiment_sqa.status_quo_parameters, name=experiment_sqa.status_quo_name, ) if experiment_sqa.status_quo_parameters is not None else None) if len(experiment_sqa.runners) == 0: runner = None elif len(experiment_sqa.runners) == 1: runner = self.runner_from_sqa(experiment_sqa.runners[0]) else: raise ValueError( # pragma: no cover "Multiple runners on experiment " "only supported for MultiTypeExperiment.") subclass = (experiment_sqa.properties or {}).get("subclass") if subclass == "SimpleExperiment": if opt_config is None: raise SQADecodeError( # pragma: no cover "SimpleExperiment must have an optimization config.") experiment = SimpleExperiment( name=experiment_sqa.name, search_space=search_space, objective_name=opt_config.objective.metric.name, minimize=opt_config.objective.minimize, outcome_constraints=opt_config.outcome_constraints, status_quo=status_quo, ) experiment.description = experiment_sqa.description experiment.is_test = experiment_sqa.is_test else: experiment = Experiment( name=experiment_sqa.name, description=experiment_sqa.description, search_space=search_space, optimization_config=opt_config, tracking_metrics=tracking_metrics, runner=runner, status_quo=status_quo, is_test=experiment_sqa.is_test, ) return experiment
def setUp(self) -> None: self.experiment = get_experiment() self.arm = Arm({"x": 1, "y": "foo", "z": True, "w": 4}) self.trial = self.experiment.new_trial(GeneratorRun([self.arm])) self.experiment_2 = get_experiment() self.batch_trial = self.experiment_2.new_batch_trial(GeneratorRun([self.arm])) self.batch_trial.set_status_quo_with_weight(self.experiment_2.status_quo, 1) self.obs_feat = ObservationFeatures.from_arm( arm=self.trial.arm, trial_index=np.int64(self.trial.index) )
def gen_arms( observation_features: List[ObservationFeatures], arms_by_signature: Optional[Dict[str, Arm]] = None, ) -> Tuple[List[Arm], Optional[Dict[str, TCandidateMetadata]]]: """Converts observation features to a tuple of arms list and candidate metadata dict, where arm signatures are mapped to their respective candidate metadata. """ # TODO(T34225939): handle static context (which is stored on observation_features) arms = [] candidate_metadata = {} for of in observation_features: arm = Arm(parameters=of.parameters) if arms_by_signature is not None and arm.signature in arms_by_signature: existing_arm = arms_by_signature[arm.signature] arm = Arm(name=existing_arm.name, parameters=existing_arm.parameters) arms.append(arm) if of.metadata: candidate_metadata[arm.signature] = of.metadata return arms, candidate_metadata or None # None if empty cand. metadata.
def test_fail_on_batch(self): ax_client = AxClient() ax_client.create_experiment( parameters=[ {"name": "x1", "type": "range", "bounds": [-5.0, 10.0]}, {"name": "x2", "type": "range", "bounds": [0.0, 15.0]}, ], minimize=True, ) batch_trial = ax_client.experiment.new_batch_trial( generator_run=GeneratorRun( arms=[ Arm(parameters={"x1": 0, "x2": 1}), Arm(parameters={"x1": 0, "x2": 1}), ] ) ) with self.assertRaises(NotImplementedError): ax_client.complete_trial(batch_trial.index, 0)
def _init_mt_experiment_from_sqa( self, experiment_sqa: SQAExperiment) -> MultiTypeExperiment: """First step of conversion within experiment_from_sqa.""" opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa( metrics_sqa=experiment_sqa.metrics) search_space = self.search_space_from_sqa( parameters_sqa=experiment_sqa.parameters, parameter_constraints_sqa=experiment_sqa.parameter_constraints, ) if search_space is None: raise SQADecodeError( # pragma: no cover "Experiment SearchSpace cannot be None.") status_quo = ( Arm( # pyre-fixme[6]: Expected `Dict[str, Optional[Union[bool, float, # int, str]]]` for 1st param but got `Optional[Dict[str, # Optional[Union[bool, float, int, str]]]]`. parameters=experiment_sqa.status_quo_parameters, name=experiment_sqa.status_quo_name, ) if experiment_sqa.status_quo_parameters is not None else None) trial_type_to_runner = { not_none(sqa_runner.trial_type): self.runner_from_sqa(sqa_runner) for sqa_runner in experiment_sqa.runners } default_trial_type = not_none(experiment_sqa.default_trial_type) properties = experiment_sqa.properties if properties: # Remove 'subclass' from experiment's properties, since its only # used for decoding to the correct experiment subclass in storage. properties.pop(Keys.SUBCLASS, None) default_data_type = experiment_sqa.default_data_type experiment = MultiTypeExperiment( name=experiment_sqa.name, description=experiment_sqa.description, search_space=search_space, default_trial_type=default_trial_type, default_runner=trial_type_to_runner[default_trial_type], optimization_config=opt_config, status_quo=status_quo, properties=properties, default_data_type=default_data_type, ) experiment._trial_type_to_runner = trial_type_to_runner sqa_metric_dict = { metric.name: metric for metric in experiment_sqa.metrics } for tracking_metric in tracking_metrics: sqa_metric = sqa_metric_dict[tracking_metric.name] experiment.add_tracking_metric( tracking_metric, trial_type=not_none(sqa_metric.trial_type), canonical_name=sqa_metric.canonical_name, ) return experiment
def get_simple_experiment() -> SimpleExperiment: experiment = SimpleExperiment( name="test_branin", search_space=get_branin_search_space(), status_quo=Arm(parameters={"x1": 0.0, "x2": 0.0}), objective_name="sum", ) experiment.description = "foobar" return experiment
def setUp(self): self.search_space = SearchSpace( parameters=[ RangeParameter( "a", lower=1, upper=3, parameter_type=ParameterType.FLOAT ), ChoiceParameter( "b", parameter_type=ParameterType.STRING, values=["a", "b", "c"] ), ] ) self.observation_features = [ ObservationFeatures(parameters={"a": 2, "b": "a"}), ObservationFeatures(parameters={"a": 3, "b": "b"}), ObservationFeatures(parameters={"a": 3, "b": "c"}), ] self.signature_to_parameterization = { Arm(parameters=obsf.parameters).signature: obsf.parameters for obsf in self.observation_features } self.transformed_features = [ ObservationFeatures( parameters={"arms": Arm(parameters={"a": 2, "b": "a"}).signature} ), ObservationFeatures( parameters={"arms": Arm(parameters={"a": 3, "b": "b"}).signature} ), ObservationFeatures( parameters={"arms": Arm(parameters={"a": 3, "b": "c"}).signature} ), ] self.t = SearchSpaceToChoice( search_space=self.search_space, observation_features=self.observation_features, observation_data=None, ) self.t2 = SearchSpaceToChoice( search_space=self.search_space, observation_features=[self.observation_features[0]], observation_data=None, )
def test_best_raw_objective_point_scalarized(self): exp = get_branin_experiment() exp.optimization_config = OptimizationConfig( ScalarizedObjective(metrics=[get_branin_metric()], minimize=False) ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point(exp) self.assertEqual(get_best_parameters(exp, Models), None) exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() self.assertEqual(get_best_raw_objective_point(exp)[0], {"x1": 5.0, "x2": 5.0})
def testBasic(self) -> None: self.assertTrue(self.experiment.is_simple_experiment) trial = self.experiment.new_trial() with self.assertRaises(NotImplementedError): trial.runner = SyntheticRunner() with self.assertRaises(NotImplementedError): self.experiment.add_tracking_metric(Metric(name="test")) with self.assertRaises(NotImplementedError): self.experiment.update_tracking_metric(Metric(name="test")) self.assertTrue(self.experiment.eval_trial(trial).df.empty) batch = self.experiment.new_batch_trial() batch.add_arm(Arm(parameters={"x1": 5, "x2": 10})) self.assertEqual(self.experiment.eval_trial(batch).df["mean"][0], 15) self.experiment.new_batch_trial().add_arm( Arm(parameters={ "x1": 15, "x2": 25 })) self.assertAlmostEqual(self.experiment.eval().df["mean"][1], 40) self.assertEqual(batch.fetch_data().df["mean"][0], 15) self.assertAlmostEqual(self.experiment.fetch_data().df["mean"][1], 40)
def testObservationsWithCandidateMetadata(self): SOME_METADATA_KEY = "metadatum" truth = [ { "arm_name": "0_0", "parameters": {"x": 0, "y": "a"}, "mean": 2.0, "sem": 2.0, "trial_index": 0, "metric_name": "a", }, { "arm_name": "1_0", "parameters": {"x": 1, "y": "b"}, "mean": 3.0, "sem": 3.0, "trial_index": 1, "metric_name": "a", }, ] arms = { obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"]) for obs in truth } experiment = Mock() experiment._trial_indices_by_status = {status: set() for status in TrialStatus} trials = { obs["trial_index"]: Trial( experiment, GeneratorRun( arms=[arms[obs["arm_name"]]], candidate_metadata_by_arm_signature={ arms[obs["arm_name"]].signature: { SOME_METADATA_KEY: f"value_{obs['trial_index']}" } }, ), ) for obs in truth } type(experiment).arms_by_name = PropertyMock(return_value=arms) type(experiment).trials = PropertyMock(return_value=trials) df = pd.DataFrame(truth)[ ["arm_name", "trial_index", "mean", "sem", "metric_name"] ] data = Data(df=df) observations = observations_from_data(experiment, data) for observation in observations: self.assertEqual( observation.features.metadata.get(SOME_METADATA_KEY), f"value_{observation.features.trial_index}", )
def __init__( self, search_space: SearchSpace, observation_features: List[ObservationFeatures], observation_data: List[ObservationData], config: Optional[TConfig] = None, ) -> None: self.parameter_name = "arms" self.signature_to_parameterization = { Arm(parameters=obsf.parameters).signature: obsf.parameters for obsf in observation_features }
def get_arm_weights() -> MutableMapping[Arm, float]: # pyre: parameters_dicts is declared to have type `List[Dict[str, typing. # pyre: Optional[typing.Union[bool, float, str]]]]` but is used as type # pyre-fixme[9]: `List[Dict[str, typing.Union[float, str]]]`. parameters_dicts: List[TParameterization] = [ {"w": 0.85, "x": 1, "y": "baz", "z": False}, {"w": 0.75, "x": 1, "y": "foo", "z": True}, {"w": 1.4, "x": 2, "y": "bar", "z": True}, ] arms = [Arm(param_dict) for param_dict in parameters_dicts] weights = [0.25, 0.5, 0.25] return OrderedDict(zip(arms, weights))
def test_best_raw_objective_point(self): exp = get_branin_experiment() with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point(exp) self.assertEqual(get_best_parameters(exp), None) exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() opt_conf = exp.optimization_config.clone() opt_conf.objective.metric._name = "not_branin" with self.assertRaisesRegex(ValueError, "No data has been logged"): get_best_raw_objective_point(exp, opt_conf)
def testAddGeneratorRunValidation(self): new_batch_trial = self.experiment.new_batch_trial() new_arms = [ Arm(name="0_1", parameters={ "w": 0.75, "x": 1, "y": "foo", "z": True }), Arm(name="0_2", parameters={ "w": 0.75, "x": 1, "y": "foo", "z": True }), ] gr = GeneratorRun(arms=new_arms) with self.assertRaises(ValueError): new_batch_trial.add_generator_run(gr)
def testClone(self): # Test simple cloning. arm = Arm({"x": 0, "y": "a"}) obsf = ObservationFeatures.from_arm(arm, trial_index=3) self.assertIsNot(obsf, obsf.clone()) self.assertEqual(obsf, obsf.clone()) # Test cloning with swapping parameters. clone_with_new_params = obsf.clone(replace_parameters={"x": 1, "y": "b"}) self.assertNotEqual(obsf, clone_with_new_params) obsf.parameters = {"x": 1, "y": "b"} self.assertEqual(obsf, clone_with_new_params)
def test_sobol_GPEI_strategy_batches(self): mock_GPEI_gen = self.mock_torch_model_bridge.return_value.gen mock_GPEI_gen.return_value = GeneratorRun(arms=[ Arm(parameters={ "x1": 1, "x2": 2 }), Arm(parameters={ "x1": 3, "x2": 4 }), ]) exp = get_branin_experiment() sobol_GPEI_generation_strategy = GenerationStrategy( name="Sobol+GPEI", steps=[ GenerationStep( model=Models.SOBOL, num_trials=1, model_kwargs=self.step_model_kwargs, ), GenerationStep(model=Models.GPEI, num_trials=6, model_kwargs=self.step_model_kwargs), ], ) self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI") self.assertEqual(sobol_GPEI_generation_strategy.model_transitions, [1]) gr = sobol_GPEI_generation_strategy.gen(exp, n=2) exp.new_batch_trial(generator_run=gr).run() for i in range(1, 8): if i == 7: # Check completeness error message. with self.assertRaises(GenerationStrategyCompleted): g = sobol_GPEI_generation_strategy.gen(exp, n=2) else: g = sobol_GPEI_generation_strategy.gen(exp, n=2) exp.new_batch_trial(generator_run=g).run() self.assertIsInstance(sobol_GPEI_generation_strategy.model, TorchModelBridge)
def test_sobol_GPEI_strategy_batches(self): mock_GPEI_gen = self.mock_torch_model_bridge.return_value.gen mock_GPEI_gen.return_value = GeneratorRun(arms=[ Arm(parameters={ "x1": 1, "x2": 2 }), Arm(parameters={ "x1": 3, "x2": 4 }), ]) exp = get_branin_experiment() sobol_GPEI_generation_strategy = GenerationStrategy( name="Sobol+GPEI", steps=[ GenerationStep(model=Models.SOBOL, num_arms=5), GenerationStep(model=Models.GPEI, num_arms=8), ], ) self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI") self.assertEqual(sobol_GPEI_generation_strategy.model_transitions, [5]) exp.new_batch_trial( generator_run=sobol_GPEI_generation_strategy.gen(exp, n=2)).run() for i in range(1, 8): if i == 7: # Check completeness error message. with self.assertRaisesRegex(ValueError, "Generation strategy"): g = sobol_GPEI_generation_strategy.gen(exp, exp.fetch_data(), n=2) else: g = sobol_GPEI_generation_strategy.gen( exp, exp._fetch_trial_data(trial_index=i - 1), n=2) exp.new_batch_trial(generator_run=g).run() with self.assertRaises(ValueError): sobol_GPEI_generation_strategy.gen(exp, exp.fetch_data()) self.assertIsInstance(sobol_GPEI_generation_strategy.model, TorchModelBridge)
def get_status_quo() -> Arm: return Arm( # Expected `Dict[str, typing.Optional[typing.Union[bool, float, str]]]` for 2nd # parameter `parameters` to call `ax.core.arm.Arm.__init__` # but got `Dict[str, typing.Union[float, str]]`. parameters={ "w": 0.2, "x": 1, "y": "bar", "z": False }, name="status_quo", )
def testAddArm(self): self.assertEqual(len(self.batch.arms), len(self.arms)) self.assertEqual(len(self.batch.generator_run_structs), 1) self.assertEqual(sum(self.batch.weights), sum(self.weights)) arm_parameters = get_arm().parameters arm_parameters["w"] = 5.0 self.batch.add_arm(Arm(arm_parameters), 3) self.assertEqual(self.batch.arms_by_name["0_2"], self.batch.arms[2]) self.assertEqual(len(self.batch.arms), len(self.arms) + 1) self.assertEqual(len(self.batch.generator_run_structs), 2) self.assertEqual(sum(self.batch.weights), sum(self.weights) + 3)
def attach_trial( self, parameters: TParameterization) -> Tuple[TParameterization, int]: """Attach a new trial with the given parameterization to the experiment. Args: parameters: Parameterization of the new trial. Returns: Tuple of parameterization and trial index from newly created trial. """ trial = self.experiment.new_trial().add_arm(Arm(parameters=parameters)) self._save_experiment_if_possible() return not_none(trial.arm).parameters, trial.index
def testEq(self): self.assertEqual(self.unweighted_run, self.unweighted_run) arms = [ Arm(parameters={ "w": 0.5, "x": 15, "y": "foo", "z": False }), Arm(parameters={ "w": 1.4, "x": 2, "y": "bar", "z": True }), ] unweighted_run_2 = GeneratorRun( arms=arms, optimization_config=self.optimization_config, search_space=self.search_space, ) self.assertNotEqual(self.unweighted_run, unweighted_run_2)
def testStatusQuo(self): tot_weight = sum(self.batch.weights) new_sq = Arm(parameters={"w": 0.95, "x": 1, "y": "foo", "z": True}) # Test negative weight with self.assertRaises(ValueError): self.batch.set_status_quo_with_weight(new_sq, -1) # Test that directly setting the status quo raises an error with self.assertRaises(NotImplementedError): self.batch.status_quo = new_sq # Set status quo to new arm self.batch.set_status_quo_with_weight(new_sq, self.sq_weight) self.assertTrue(self.batch.status_quo == new_sq) self.assertEqual(self.batch.status_quo.name, "status_quo_0") self.assertEqual(sum(self.batch.weights), tot_weight + self.sq_weight) # sq weight should be ignored when sq is None self.batch.unset_status_quo() self.assertEqual(sum(self.batch.weights), tot_weight) # Verify experiment status quo gets set on init self.experiment.status_quo = self.status_quo batch2 = self.batch.clone() self.assertEqual(batch2.status_quo, self.experiment.status_quo) # Since optimize_for_power was not set, the weight override should not be # And the status quo shoudl not appear in arm_weights self.assertIsNone(batch2._status_quo_weight_override) self.assertTrue(batch2.status_quo not in batch2.arm_weights) self.assertEqual(sum(batch2.weights), sum(self.weights)) # Try setting sq to existing arm with different name with self.assertRaises(ValueError): self.batch.set_status_quo_with_weight( Arm(new_sq.parameters, name="new_name"), 1 )
def testNormalizedArmWeights(self): new_batch_trial = self.experiment.new_batch_trial() parameterizations = [ { "w": 0.75, "x": 1, "y": "foo", "z": True }, { "w": 0.77, "x": 2, "y": "foo", "z": True }, ] arms = [Arm(parameters=p) for i, p in enumerate(parameterizations)] new_batch_trial.add_arms_and_weights(arms=arms, weights=[2, 1]) # test normalizing to 1 arm_weights = new_batch_trial.normalized_arm_weights() # self.assertEqual(list(arm_weights.keys()), arms) batch_arm_parameters = [ arm.parameters for arm in list(arm_weights.keys()) ] arm_parameters = [arm.parameters for arm in arms] self.assertEqual(batch_arm_parameters, arm_parameters) self.assertTrue(np.allclose(list(arm_weights.values()), [2 / 3, 1 / 3])) # test normalizing to 100 arm_weights = new_batch_trial.normalized_arm_weights(total=100) batch_arm_parameters = [ arm.parameters for arm in list(arm_weights.keys()) ] arm_parameters = [arm.parameters for arm in arms] self.assertEqual(batch_arm_parameters, arm_parameters) self.assertTrue( np.allclose(list(arm_weights.values()), [200 / 3, 100 / 3])) # test normalizing with truncation arm_weights = new_batch_trial.normalized_arm_weights(total=1, trunc_digits=2) batch_arm_parameters = [ arm.parameters for arm in list(arm_weights.keys()) ] arm_parameters = [arm.parameters for arm in arms] self.assertEqual(batch_arm_parameters, arm_parameters) self.assertTrue(np.allclose(list(arm_weights.values()), [0.67, 0.33]))
def _cast_arm(self, arm: Arm) -> Arm: """Cast parameterization of given arm to the types in this search space and to its hierarchical structure; return the newly cast arm. For each parameter in given arm, cast it to the proper type specified in this search space and remove it from the arm if that parameter should not be in the arm within the search space due to its hierarchical structure. """ # Validate parameter values in flat search space. arm = super().cast_arm(arm=arm) return Arm( parameters=self._cast_parameterization(parameters=arm.parameters), name=arm._name, )
def test_best_raw_objective_point_unsatisfiable_relative(self): exp = get_branin_experiment() # Optimization config with unsatisfiable constraint opt_conf = exp.optimization_config.clone() opt_conf.outcome_constraints.append( OutcomeConstraint( metric=get_branin_metric(), op=ComparisonOp.GEQ, bound=9999, relative=True, )) trial = exp.new_trial(generator_run=GeneratorRun( arms=[Arm(parameters={ "x1": 5.0, "x2": 5.0 })])).run() trial.mark_completed() with self.assertLogs(logger="ax.service.utils.best_point", level="WARN") as lg: get_best_raw_objective_point(exp, opt_conf) self.assertTrue( any("No status quo provided" in warning for warning in lg.output), msg=lg.output, ) exp.status_quo = Arm(parameters={"x1": 0, "x2": 0}, name="status_quo") sq_trial = exp.new_trial(generator_run=GeneratorRun( arms=[exp.status_quo])).run() sq_trial.mark_completed() with self.assertRaisesRegex(ValueError, "No points satisfied"): get_best_raw_objective_point(exp, opt_conf)
def out_of_design_arm(self) -> Arm: """Create a default out-of-design arm. An out of design arm contains values for some parameters which are outside of the search space. In the modeling conversion, these parameters are all stripped down to an empty dictionary, since the point is already outside of the modeled space. Returns: New arm w/ null parameter values. """ parameters = {} for p_name in self.parameters.keys(): parameters[p_name] = None return Arm(parameters)