Esempio n. 1
0
def get_branin_arms(n: int, seed: int) -> List[Arm]:
    # TODO replace with sobol
    np.random.seed(seed)
    x1_raw = np.random.rand(n)
    x2_raw = np.random.rand(n)
    return [
        Arm(parameters={
            "x1": -5 + x1_raw[i] * 15,
            "x2": x2_raw[i] * 15
        }) for i in range(n)
    ]
Esempio n. 2
0
def gen_arms(
    observation_features: List[ObservationFeatures],
    arms_by_signature: Optional[Dict[str, Arm]] = None,
) -> Tuple[List[Arm], Optional[Dict[str, TCandidateMetadata]]]:
    """Converts observation features to a tuple of arms list and candidate metadata
    dict, where arm signatures are mapped to their respective candidate metadata.
    """
    # TODO(T34225939): handle static context (which is stored on observation_features)
    arms = []
    candidate_metadata = {}
    for of in observation_features:
        arm = Arm(parameters=of.parameters)
        if arms_by_signature is not None and arm.signature in arms_by_signature:
            existing_arm = arms_by_signature[arm.signature]
            arm = Arm(name=existing_arm.name,
                      parameters=existing_arm.parameters)
        arms.append(arm)
        if of.metadata:
            candidate_metadata[arm.signature] = of.metadata
    return arms, candidate_metadata or None  # None if empty cand. metadata.
Esempio n. 3
0
def get_simple_experiment() -> SimpleExperiment:
    experiment = SimpleExperiment(
        name="test_branin",
        search_space=get_branin_search_space(),
        status_quo=Arm(parameters={"x1": 0.0, "x2": 0.0}),
        objective_name="sum",
    )

    experiment.description = "foobar"

    return experiment
 def setUp(self):
     self.search_space = SearchSpace(
         parameters=[
             RangeParameter(
                 "a", lower=1, upper=3, parameter_type=ParameterType.FLOAT
             ),
             ChoiceParameter(
                 "b", parameter_type=ParameterType.STRING, values=["a", "b", "c"]
             ),
         ]
     )
     self.observation_features = [
         ObservationFeatures(parameters={"a": 2, "b": "a"}),
         ObservationFeatures(parameters={"a": 3, "b": "b"}),
         ObservationFeatures(parameters={"a": 3, "b": "c"}),
     ]
     self.signature_to_parameterization = {
         Arm(parameters=obsf.parameters).signature: obsf.parameters
         for obsf in self.observation_features
     }
     self.transformed_features = [
         ObservationFeatures(
             parameters={"arms": Arm(parameters={"a": 2, "b": "a"}).signature}
         ),
         ObservationFeatures(
             parameters={"arms": Arm(parameters={"a": 3, "b": "b"}).signature}
         ),
         ObservationFeatures(
             parameters={"arms": Arm(parameters={"a": 3, "b": "c"}).signature}
         ),
     ]
     self.t = SearchSpaceToChoice(
         search_space=self.search_space,
         observation_features=self.observation_features,
         observation_data=None,
     )
     self.t2 = SearchSpaceToChoice(
         search_space=self.search_space,
         observation_features=[self.observation_features[0]],
         observation_data=None,
     )
Esempio n. 5
0
 def setUp(self) -> None:
     self.experiment = get_experiment()
     self.arm = Arm({"x": 1, "y": "foo", "z": True, "w": 4})
     self.trial = self.experiment.new_trial(GeneratorRun([self.arm]))
     self.experiment_2 = get_experiment()
     self.batch_trial = self.experiment_2.new_batch_trial(
         GeneratorRun([self.arm]))
     self.batch_trial.set_status_quo_with_weight(
         self.experiment_2.status_quo, 1)
     self.obs_feat = ObservationFeatures.from_arm(arm=self.trial.arm,
                                                  trial_index=np.int64(
                                                      self.trial.index))
Esempio n. 6
0
 def testAddGeneratorRunValidation(self):
     new_batch_trial = self.experiment.new_batch_trial()
     new_arms = [
         Arm(name="0_1",
             parameters={
                 "w": 0.75,
                 "x": 1,
                 "y": "foo",
                 "z": True
             }),
         Arm(name="0_2",
             parameters={
                 "w": 0.75,
                 "x": 1,
                 "y": "foo",
                 "z": True
             }),
     ]
     gr = GeneratorRun(arms=new_arms)
     with self.assertRaises(ValueError):
         new_batch_trial.add_generator_run(gr)
Esempio n. 7
0
 def testBasic(self) -> None:
     self.assertTrue(self.experiment.is_simple_experiment)
     trial = self.experiment.new_trial()
     with self.assertRaises(NotImplementedError):
         trial.runner = SyntheticRunner()
     with self.assertRaises(NotImplementedError):
         self.experiment.add_tracking_metric(Metric(name="test"))
     with self.assertRaises(NotImplementedError):
         self.experiment.update_tracking_metric(Metric(name="test"))
     self.assertTrue(self.experiment.eval_trial(trial).df.empty)
     batch = self.experiment.new_batch_trial()
     batch.add_arm(Arm(parameters={"x1": 5, "x2": 10}))
     self.assertEqual(self.experiment.eval_trial(batch).df["mean"][0], 15)
     self.experiment.new_batch_trial().add_arm(
         Arm(parameters={
             "x1": 15,
             "x2": 25
         }))
     self.assertAlmostEqual(self.experiment.eval().df["mean"][1], 40)
     self.assertEqual(batch.fetch_data().df["mean"][0], 15)
     self.assertAlmostEqual(self.experiment.fetch_data().df["mean"][1], 40)
Esempio n. 8
0
    def testObservationsWithCandidateMetadata(self):
        SOME_METADATA_KEY = "metadatum"
        truth = [
            {
                "arm_name": "0_0",
                "parameters": {"x": 0, "y": "a"},
                "mean": 2.0,
                "sem": 2.0,
                "trial_index": 0,
                "metric_name": "a",
            },
            {
                "arm_name": "1_0",
                "parameters": {"x": 1, "y": "b"},
                "mean": 3.0,
                "sem": 3.0,
                "trial_index": 1,
                "metric_name": "a",
            },
        ]
        arms = {
            obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
            for obs in truth
        }
        experiment = Mock()
        experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
        trials = {
            obs["trial_index"]: Trial(
                experiment,
                GeneratorRun(
                    arms=[arms[obs["arm_name"]]],
                    candidate_metadata_by_arm_signature={
                        arms[obs["arm_name"]].signature: {
                            SOME_METADATA_KEY: f"value_{obs['trial_index']}"
                        }
                    },
                ),
            )
            for obs in truth
        }
        type(experiment).arms_by_name = PropertyMock(return_value=arms)
        type(experiment).trials = PropertyMock(return_value=trials)

        df = pd.DataFrame(truth)[
            ["arm_name", "trial_index", "mean", "sem", "metric_name"]
        ]
        data = Data(df=df)
        observations = observations_from_data(experiment, data)
        for observation in observations:
            self.assertEqual(
                observation.features.metadata.get(SOME_METADATA_KEY),
                f"value_{observation.features.trial_index}",
            )
Esempio n. 9
0
def get_arm_weights() -> MutableMapping[Arm, float]:
    # pyre: parameters_dicts is declared to have type `List[Dict[str, typing.
    # pyre: Optional[typing.Union[bool, float, str]]]]` but is used as type
    # pyre-fixme[9]: `List[Dict[str, typing.Union[float, str]]]`.
    parameters_dicts: List[TParameterization] = [
        {"w": 0.85, "x": 1, "y": "baz", "z": False},
        {"w": 0.75, "x": 1, "y": "foo", "z": True},
        {"w": 1.4, "x": 2, "y": "bar", "z": True},
    ]
    arms = [Arm(param_dict) for param_dict in parameters_dicts]
    weights = [0.25, 0.5, 0.25]
    return OrderedDict(zip(arms, weights))
 def __init__(
     self,
     search_space: SearchSpace,
     observation_features: List[ObservationFeatures],
     observation_data: List[ObservationData],
     config: Optional[TConfig] = None,
 ) -> None:
     self.parameter_name = "arms"
     self.signature_to_parameterization = {
         Arm(parameters=obsf.parameters).signature: obsf.parameters
         for obsf in observation_features
     }
Esempio n. 11
0
 def test_best_raw_objective_point_scalarized(self):
     exp = get_branin_experiment()
     exp.optimization_config = OptimizationConfig(
         ScalarizedObjective(metrics=[get_branin_metric()], minimize=False)
     )
     with self.assertRaisesRegex(ValueError, "Cannot identify best "):
         get_best_raw_objective_point(exp)
     self.assertEqual(get_best_parameters(exp, Models), None)
     exp.new_trial(
         generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
     ).run()
     self.assertEqual(get_best_raw_objective_point(exp)[0], {"x1": 5.0, "x2": 5.0})
Esempio n. 12
0
    def testClone(self):
        # Test simple cloning.
        arm = Arm({"x": 0, "y": "a"})
        obsf = ObservationFeatures.from_arm(arm, trial_index=3)
        self.assertIsNot(obsf, obsf.clone())
        self.assertEqual(obsf, obsf.clone())

        # Test cloning with swapping parameters.
        clone_with_new_params = obsf.clone(replace_parameters={"x": 1, "y": "b"})
        self.assertNotEqual(obsf, clone_with_new_params)
        obsf.parameters = {"x": 1, "y": "b"}
        self.assertEqual(obsf, clone_with_new_params)
Esempio n. 13
0
 def _init_mt_experiment_from_sqa(
         self, experiment_sqa: SQAExperiment) -> MultiTypeExperiment:
     """First step of conversion within experiment_from_sqa."""
     opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa(
         metrics_sqa=experiment_sqa.metrics)
     search_space = self.search_space_from_sqa(
         parameters_sqa=experiment_sqa.parameters,
         parameter_constraints_sqa=experiment_sqa.parameter_constraints,
     )
     if search_space is None:
         raise SQADecodeError(  # pragma: no cover
             "Experiment SearchSpace cannot be None.")
     status_quo = (
         Arm(
             # pyre-fixme[6]: Expected `Dict[str, Optional[Union[bool, float,
             #  int, str]]]` for 1st param but got `Optional[Dict[str,
             #  Optional[Union[bool, float, int, str]]]]`.
             parameters=experiment_sqa.status_quo_parameters,
             name=experiment_sqa.status_quo_name,
         ) if experiment_sqa.status_quo_parameters is not None else None)
     trial_type_to_runner = {
         not_none(sqa_runner.trial_type): self.runner_from_sqa(sqa_runner)
         for sqa_runner in experiment_sqa.runners
     }
     default_trial_type = not_none(experiment_sqa.default_trial_type)
     properties = experiment_sqa.properties
     if properties:
         # Remove 'subclass' from experiment's properties, since its only
         # used for decoding to the correct experiment subclass in storage.
         properties.pop(Keys.SUBCLASS, None)
     experiment = MultiTypeExperiment(
         name=experiment_sqa.name,
         description=experiment_sqa.description,
         search_space=search_space,
         default_trial_type=default_trial_type,
         default_runner=trial_type_to_runner[default_trial_type],
         optimization_config=opt_config,
         status_quo=status_quo,
         properties=properties,
     )
     experiment._trial_type_to_runner = trial_type_to_runner
     sqa_metric_dict = {
         metric.name: metric
         for metric in experiment_sqa.metrics
     }
     for tracking_metric in tracking_metrics:
         sqa_metric = sqa_metric_dict[tracking_metric.name]
         experiment.add_tracking_metric(
             tracking_metric,
             trial_type=not_none(sqa_metric.trial_type),
             canonical_name=sqa_metric.canonical_name,
         )
     return experiment
Esempio n. 14
0
 def test_best_raw_objective_point(self):
     exp = get_branin_experiment()
     with self.assertRaisesRegex(ValueError, "Cannot identify best "):
         get_best_raw_objective_point(exp)
     self.assertEqual(get_best_parameters(exp), None)
     exp.new_trial(
         generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
     ).run()
     opt_conf = exp.optimization_config.clone()
     opt_conf.objective.metric._name = "not_branin"
     with self.assertRaisesRegex(ValueError, "No data has been logged"):
         get_best_raw_objective_point(exp, opt_conf)
Esempio n. 15
0
 def test_sobol_GPEI_strategy_batches(self):
     mock_GPEI_gen = self.mock_torch_model_bridge.return_value.gen
     mock_GPEI_gen.return_value = GeneratorRun(arms=[
         Arm(parameters={
             "x1": 1,
             "x2": 2
         }),
         Arm(parameters={
             "x1": 3,
             "x2": 4
         }),
     ])
     exp = get_branin_experiment()
     sobol_GPEI_generation_strategy = GenerationStrategy(
         name="Sobol+GPEI",
         steps=[
             GenerationStep(
                 model=Models.SOBOL,
                 num_trials=1,
                 model_kwargs=self.step_model_kwargs,
             ),
             GenerationStep(model=Models.GPEI,
                            num_trials=6,
                            model_kwargs=self.step_model_kwargs),
         ],
     )
     self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI")
     self.assertEqual(sobol_GPEI_generation_strategy.model_transitions, [1])
     gr = sobol_GPEI_generation_strategy.gen(exp, n=2)
     exp.new_batch_trial(generator_run=gr).run()
     for i in range(1, 8):
         if i == 7:
             # Check completeness error message.
             with self.assertRaises(GenerationStrategyCompleted):
                 g = sobol_GPEI_generation_strategy.gen(exp, n=2)
         else:
             g = sobol_GPEI_generation_strategy.gen(exp, n=2)
         exp.new_batch_trial(generator_run=g).run()
     self.assertIsInstance(sobol_GPEI_generation_strategy.model,
                           TorchModelBridge)
Esempio n. 16
0
def get_status_quo() -> Arm:
    return Arm(
        # Expected `Dict[str, typing.Optional[typing.Union[bool, float, str]]]` for 2nd
        # parameter `parameters` to call `ax.core.arm.Arm.__init__`
        # but got `Dict[str, typing.Union[float, str]]`.
        parameters={
            "w": 0.2,
            "x": 1,
            "y": "bar",
            "z": False
        },
        name="status_quo",
    )
Esempio n. 17
0
 def test_sobol_GPEI_strategy_batches(self):
     mock_GPEI_gen = self.mock_torch_model_bridge.return_value.gen
     mock_GPEI_gen.return_value = GeneratorRun(arms=[
         Arm(parameters={
             "x1": 1,
             "x2": 2
         }),
         Arm(parameters={
             "x1": 3,
             "x2": 4
         }),
     ])
     exp = get_branin_experiment()
     sobol_GPEI_generation_strategy = GenerationStrategy(
         name="Sobol+GPEI",
         steps=[
             GenerationStep(model=Models.SOBOL, num_arms=5),
             GenerationStep(model=Models.GPEI, num_arms=8),
         ],
     )
     self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI")
     self.assertEqual(sobol_GPEI_generation_strategy.model_transitions, [5])
     exp.new_batch_trial(
         generator_run=sobol_GPEI_generation_strategy.gen(exp, n=2)).run()
     for i in range(1, 8):
         if i == 7:
             # Check completeness error message.
             with self.assertRaisesRegex(ValueError, "Generation strategy"):
                 g = sobol_GPEI_generation_strategy.gen(exp,
                                                        exp.fetch_data(),
                                                        n=2)
         else:
             g = sobol_GPEI_generation_strategy.gen(
                 exp, exp._fetch_trial_data(trial_index=i - 1), n=2)
         exp.new_batch_trial(generator_run=g).run()
     with self.assertRaises(ValueError):
         sobol_GPEI_generation_strategy.gen(exp, exp.fetch_data())
     self.assertIsInstance(sobol_GPEI_generation_strategy.model,
                           TorchModelBridge)
Esempio n. 18
0
    def testAddArm(self):
        self.assertEqual(len(self.batch.arms), len(self.arms))
        self.assertEqual(len(self.batch.generator_run_structs), 1)
        self.assertEqual(sum(self.batch.weights), sum(self.weights))

        arm_parameters = get_arm().parameters
        arm_parameters["w"] = 5.0
        self.batch.add_arm(Arm(arm_parameters), 3)

        self.assertEqual(self.batch.arms_by_name["0_2"], self.batch.arms[2])
        self.assertEqual(len(self.batch.arms), len(self.arms) + 1)
        self.assertEqual(len(self.batch.generator_run_structs), 2)
        self.assertEqual(sum(self.batch.weights), sum(self.weights) + 3)
Esempio n. 19
0
    def testEq(self):
        self.assertEqual(self.unweighted_run, self.unweighted_run)

        arms = [
            Arm(parameters={
                "w": 0.5,
                "x": 15,
                "y": "foo",
                "z": False
            }),
            Arm(parameters={
                "w": 1.4,
                "x": 2,
                "y": "bar",
                "z": True
            }),
        ]
        unweighted_run_2 = GeneratorRun(
            arms=arms,
            optimization_config=self.optimization_config,
            search_space=self.search_space,
        )
        self.assertNotEqual(self.unweighted_run, unweighted_run_2)
Esempio n. 20
0
    def attach_trial(
            self,
            parameters: TParameterization) -> Tuple[TParameterization, int]:
        """Attach a new trial with the given parameterization to the experiment.

        Args:
            parameters: Parameterization of the new trial.

        Returns:
            Tuple of parameterization and trial index from newly created trial.
        """
        trial = self.experiment.new_trial().add_arm(Arm(parameters=parameters))
        self._save_experiment_if_possible()
        return not_none(trial.arm).parameters, trial.index
Esempio n. 21
0
    def testStatusQuo(self):
        tot_weight = sum(self.batch.weights)
        new_sq = Arm(parameters={"w": 0.95, "x": 1, "y": "foo", "z": True})

        # Test negative weight
        with self.assertRaises(ValueError):
            self.batch.set_status_quo_with_weight(new_sq, -1)

        # Test that directly setting the status quo raises an error
        with self.assertRaises(NotImplementedError):
            self.batch.status_quo = new_sq

        # Set status quo to new arm
        self.batch.set_status_quo_with_weight(new_sq, self.sq_weight)
        self.assertTrue(self.batch.status_quo == new_sq)
        self.assertEqual(self.batch.status_quo.name, "status_quo_0")
        self.assertEqual(sum(self.batch.weights), tot_weight + self.sq_weight)
        # sq weight should be ignored when sq is None
        self.batch.unset_status_quo()
        self.assertEqual(sum(self.batch.weights), tot_weight)

        # Verify experiment status quo gets set on init
        self.experiment.status_quo = self.status_quo
        batch2 = self.batch.clone()
        self.assertEqual(batch2.status_quo, self.experiment.status_quo)

        # Since optimize_for_power was not set, the weight override should not be
        # And the status quo shoudl not appear in arm_weights
        self.assertIsNone(batch2._status_quo_weight_override)
        self.assertTrue(batch2.status_quo not in batch2.arm_weights)
        self.assertEqual(sum(batch2.weights), sum(self.weights))

        # Try setting sq to existing arm with different name
        with self.assertRaises(ValueError):
            self.batch.set_status_quo_with_weight(
                Arm(new_sq.parameters, name="new_name"), 1
            )
Esempio n. 22
0
    def testNormalizedArmWeights(self):
        new_batch_trial = self.experiment.new_batch_trial()
        parameterizations = [
            {
                "w": 0.75,
                "x": 1,
                "y": "foo",
                "z": True
            },
            {
                "w": 0.77,
                "x": 2,
                "y": "foo",
                "z": True
            },
        ]
        arms = [Arm(parameters=p) for i, p in enumerate(parameterizations)]
        new_batch_trial.add_arms_and_weights(arms=arms, weights=[2, 1])

        # test normalizing to 1
        arm_weights = new_batch_trial.normalized_arm_weights()
        # self.assertEqual(list(arm_weights.keys()), arms)
        batch_arm_parameters = [
            arm.parameters for arm in list(arm_weights.keys())
        ]
        arm_parameters = [arm.parameters for arm in arms]
        self.assertEqual(batch_arm_parameters, arm_parameters)
        self.assertTrue(np.allclose(list(arm_weights.values()),
                                    [2 / 3, 1 / 3]))

        # test normalizing to 100
        arm_weights = new_batch_trial.normalized_arm_weights(total=100)
        batch_arm_parameters = [
            arm.parameters for arm in list(arm_weights.keys())
        ]
        arm_parameters = [arm.parameters for arm in arms]
        self.assertEqual(batch_arm_parameters, arm_parameters)
        self.assertTrue(
            np.allclose(list(arm_weights.values()), [200 / 3, 100 / 3]))

        # test normalizing with truncation
        arm_weights = new_batch_trial.normalized_arm_weights(total=1,
                                                             trunc_digits=2)
        batch_arm_parameters = [
            arm.parameters for arm in list(arm_weights.keys())
        ]
        arm_parameters = [arm.parameters for arm in arms]
        self.assertEqual(batch_arm_parameters, arm_parameters)
        self.assertTrue(np.allclose(list(arm_weights.values()), [0.67, 0.33]))
Esempio n. 23
0
 def setUp(self) -> None:
     self.experiment = get_experiment()
     self.arm = Arm({"x": 1, "y": "foo", "z": True, "w": 4})
     self.trial = self.experiment.new_trial(GeneratorRun([self.arm]))
     self.experiment_2 = get_experiment()
     self.batch_trial = self.experiment_2.new_batch_trial(
         GeneratorRun([self.arm]))
     self.batch_trial.set_status_quo_with_weight(
         self.experiment_2.status_quo, 1)
     self.obs_feat = ObservationFeatures.from_arm(arm=self.trial.arm,
                                                  trial_index=np.int64(
                                                      self.trial.index))
     self.hss_exp = get_hierarchical_search_space_experiment()
     self.hss_sobol = Models.SOBOL(search_space=self.hss_exp.search_space)
     self.hss_gr = self.hss_sobol.gen(n=1)
     self.hss_trial = self.hss_exp.new_trial(self.hss_gr)
     self.hss_arm = not_none(self.hss_trial.arm)
     self.hss_cand_metadata = self.hss_trial._get_candidate_metadata(
         arm_name=self.hss_arm.name)
     self.hss_full_parameterization = self.hss_cand_metadata.get(
         Keys.FULL_PARAMETERIZATION).copy()
     self.assertTrue(
         all(p_name in self.hss_full_parameterization
             for p_name in self.hss_exp.search_space.parameters))
     self.hss_obs_feat = ObservationFeatures.from_arm(
         arm=self.hss_arm,
         trial_index=np.int64(self.hss_trial.index),
         metadata=self.hss_cand_metadata,
     )
     self.hss_obs_feat_all_params = ObservationFeatures.from_arm(
         arm=Arm(self.hss_full_parameterization),
         trial_index=np.int64(self.hss_trial.index),
         metadata={
             Keys.FULL_PARAMETERIZATION: self.hss_full_parameterization
         },
     )
Esempio n. 24
0
    def _cast_arm(self, arm: Arm) -> Arm:
        """Cast parameterization of given arm to the types in this search space and to
        its hierarchical structure; return the newly cast arm.

        For each parameter in given arm, cast it to the proper type specified
        in this search space and remove it from the arm if that parameter should not be
        in the arm within the search space due to its hierarchical structure.
        """
        # Validate parameter values in flat search space.
        arm = super().cast_arm(arm=arm)

        return Arm(
            parameters=self._cast_parameterization(parameters=arm.parameters),
            name=arm._name,
        )
Esempio n. 25
0
    def test_best_raw_objective_point_unsatisfiable_relative(self):
        exp = get_branin_experiment()

        # Optimization config with unsatisfiable constraint
        opt_conf = exp.optimization_config.clone()
        opt_conf.outcome_constraints.append(
            OutcomeConstraint(
                metric=get_branin_metric(),
                op=ComparisonOp.GEQ,
                bound=9999,
                relative=True,
            ))

        trial = exp.new_trial(generator_run=GeneratorRun(
            arms=[Arm(parameters={
                "x1": 5.0,
                "x2": 5.0
            })])).run()
        trial.mark_completed()

        with self.assertLogs(logger="ax.service.utils.best_point",
                             level="WARN") as lg:
            get_best_raw_objective_point(exp, opt_conf)
            self.assertTrue(
                any("No status quo provided" in warning
                    for warning in lg.output),
                msg=lg.output,
            )

        exp.status_quo = Arm(parameters={"x1": 0, "x2": 0}, name="status_quo")
        sq_trial = exp.new_trial(generator_run=GeneratorRun(
            arms=[exp.status_quo])).run()
        sq_trial.mark_completed()

        with self.assertRaisesRegex(ValueError, "No points satisfied"):
            get_best_raw_objective_point(exp, opt_conf)
Esempio n. 26
0
    def out_of_design_arm(self) -> Arm:
        """Create a default out-of-design arm.

        An out of design arm contains values for some parameters which are
        outside of the search space. In the modeling conversion, these parameters
        are all stripped down to an empty dictionary, since the point is already
        outside of the modeled space.

        Returns:
            New arm w/ null parameter values.
        """
        parameters = {}
        for p_name in self.parameters.keys():
            parameters[p_name] = None
        return Arm(parameters)
Esempio n. 27
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_trial: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    with_str_choice_param: bool = False,
    search_space: Optional[SearchSpace] = None,
    minimize: bool = False,
    named: bool = True,
    with_completed_trial: bool = False,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
        with_str_choice_param=with_str_choice_param,
    )
    exp = Experiment(
        name="branin_test_experiment" if named else None,
        search_space=search_space,
        optimization_config=get_branin_optimization_config(
            minimize=minimize) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    if with_trial or with_completed_trial:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=1)
        trial = exp.new_trial(generator_run=sobol_run)

        if with_completed_trial:
            trial.mark_running(no_runner_required=True)
            exp.attach_data(
                get_branin_data(trials=[trial]))  # Add data for one trial
            trial.mark_completed()

    return exp
Esempio n. 28
0
    def test_best_raw_objective_point_unsatisfiable(self):
        exp = get_branin_experiment()
        trial = exp.new_trial(
            generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
        ).run()
        trial.mark_completed()

        opt_conf = exp.optimization_config.clone()
        opt_conf.outcome_constraints.append(
            OutcomeConstraint(
                metric=get_branin_metric(), op=ComparisonOp.LEQ, bound=0, relative=False
            )
        )

        with self.assertRaisesRegex(ValueError, "No points satisfied"):
            get_best_raw_objective_point(exp, opt_conf)
Esempio n. 29
0
 def test_run_preattached_trials_only(self):
     # assert that pre-attached trials run when max_trials = 0
     scheduler = BareBonesTestScheduler(
         experiment=self.branin_experiment,  # Has runner and metrics.
         generation_strategy=self.two_sobol_steps_GS,
         options=SchedulerOptions(
             init_seconds_between_polls=0.1,  # Short between polls so test is fast.
         ),
     )
     trial = scheduler.experiment.new_trial()
     trial.add_arm(Arm(parameters={"x1": 5, "x2": 5}))
     scheduler.run_n_trials(max_trials=0)
     self.assertEqual(len(scheduler.experiment.trials), 1)
     self.assertTrue(  # Make sure all trials got to complete.
         all(t.completed_successfully for t in scheduler.experiment.trials.values())
     )
Esempio n. 30
0
    def testGenArms(self):
        p1 = {"x": 0, "y": 1}
        p2 = {"x": 4, "y": 8}
        observation_features = [
            ObservationFeatures(parameters=p1),
            ObservationFeatures(parameters=p2),
        ]
        arms = gen_arms(observation_features=observation_features)
        self.assertEqual(arms[0].parameters, p1)

        arm = Arm(name="1_1", parameters=p1)
        arms_by_signature = {arm.signature: arm}
        arms = gen_arms(
            observation_features=observation_features,
            arms_by_signature=arms_by_signature,
        )
        self.assertEqual(arms[0].name, "1_1")