Exemple #1
0
def get_multi_type_experiment(
    add_trial_type: bool = True, add_trials: bool = False
) -> MultiTypeExperiment:
    oc = OptimizationConfig(Objective(BraninMetric("m1", ["x1", "x2"])))
    experiment = MultiTypeExperiment(
        name="test_exp",
        search_space=get_branin_search_space(),
        default_trial_type="type1",
        default_runner=SyntheticRunner(dummy_metadata="dummy1"),
        optimization_config=oc,
    )
    experiment.add_trial_type(
        trial_type="type2", runner=SyntheticRunner(dummy_metadata="dummy2")
    )
    # Switch the order of variables so metric gives different results
    experiment.add_tracking_metric(
        BraninMetric("m2", ["x2", "x1"]), trial_type="type2", canonical_name="m1"
    )

    if add_trials and add_trial_type:
        generator = get_sobol(experiment.search_space)
        gr = generator.gen(10)
        t1 = experiment.new_batch_trial(generator_run=gr, trial_type="type1")
        t2 = experiment.new_batch_trial(generator_run=gr, trial_type="type2")
        t1.set_status_quo_with_weight(status_quo=t1.arms[0], weight=0.5)
        t2.set_status_quo_with_weight(status_quo=t2.arms[0], weight=0.5)
        t1.run()
        t2.run()

    return experiment
    def testRunner(self):
        # Verify BatchTrial without runner will fail
        with self.assertRaises(ValueError):
            self.batch.run()

        # Verify mark running without runner will fail
        with self.assertRaises(ValueError):
            self.batch.mark_running()

        self.batch.runner = SyntheticRunner()
        self.batch.run()
        self.assertEqual(self.batch.deployed_name, "test_0")
        self.assertNotEqual(len(self.batch.run_metadata.keys()), 0)
        self.assertEqual(self.batch.status, TrialStatus.RUNNING)

        # Verify setting runner on experiment but not BatchTrial
        # Also mock staging_required to be false
        staging_mock = PropertyMock()
        with patch.object(SyntheticRunner, "staging_required", staging_mock):
            mock_runner = SyntheticRunner()
            staging_mock.return_value = True

            self.experiment.runner = mock_runner
            b2 = self.experiment.new_batch_trial()
            b2.run()
            self.assertEqual(b2.deployed_name, "test_1")
            self.assertEqual(b2.status, TrialStatus.STAGED)
Exemple #3
0
    def testExperimentRunner(self):
        original_runner = SyntheticRunner()
        self.experiment.runner = original_runner
        batch = self.experiment.new_batch_trial()
        batch.run()
        self.assertEqual(batch.runner, original_runner)

        # Simulate a failed run/deployment, in which the runner is attached
        # but the actual run fails, and so the trial remains CANDIDATE.
        candidate_batch = self.experiment.new_batch_trial()
        candidate_batch.run()
        candidate_batch._status = TrialStatus.CANDIDATE
        self.assertEqual(self.experiment.trials_expecting_data, [batch])
        tbs = self.experiment.trials_by_status  # All statuses should be present
        self.assertEqual(len(tbs), len(TrialStatus))
        self.assertEqual(tbs[TrialStatus.RUNNING], [batch])
        self.assertEqual(tbs[TrialStatus.CANDIDATE], [candidate_batch])
        tibs = self.experiment.trial_indices_by_status
        self.assertEqual(len(tibs), len(TrialStatus))
        self.assertEqual(tibs[TrialStatus.RUNNING], {0})
        self.assertEqual(tibs[TrialStatus.CANDIDATE], {1})

        identifier = {"new_runner": True}
        new_runner = SyntheticRunner(dummy_metadata=identifier)

        self.experiment.reset_runners(new_runner)
        # Don't update trials that have been run.
        self.assertEqual(batch.runner, original_runner)
        # Update default runner
        self.assertEqual(self.experiment.runner, new_runner)
        # Update candidate trial runners.
        self.assertEqual(self.experiment.trials[1].runner, new_runner)
Exemple #4
0
    def test_immutable_search_space_and_opt_config(self):
        mutable_exp = self._setupBraninExperiment(n=5)
        self.assertFalse(mutable_exp.immutable_search_space_and_opt_config)
        immutable_exp = Experiment(
            name="test4",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            optimization_config=get_branin_optimization_config(),
            runner=SyntheticRunner(),
            properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},
        )
        self.assertTrue(immutable_exp.immutable_search_space_and_opt_config)
        with self.assertRaises(UnsupportedError):
            immutable_exp.optimization_config = get_branin_optimization_config(
            )
        immutable_exp.new_batch_trial()
        with self.assertRaises(UnsupportedError):
            immutable_exp.search_space = get_branin_search_space()

        # Check that passing the property as just a string is processed
        # correctly.
        immutable_exp_2 = Experiment(
            name="test4",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            runner=SyntheticRunner(),
            properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value: True},
        )
        self.assertTrue(immutable_exp_2.immutable_search_space_and_opt_config)
Exemple #5
0
def get_multi_type_experiment_with_multi_objective(
    add_trials: bool = False,
) -> MultiTypeExperiment:
    oc = get_branin_multi_objective_optimization_config()
    experiment = MultiTypeExperiment(
        name="test_exp",
        search_space=get_branin_search_space(),
        default_trial_type="type1",
        default_runner=SyntheticRunner(dummy_metadata="dummy1"),
        optimization_config=oc,
    )
    experiment.add_trial_type(
        trial_type="type2", runner=SyntheticRunner(dummy_metadata="dummy2")
    )

    if add_trials:
        generator = get_sobol(experiment.search_space)
        gr = generator.gen(10)
        t1 = experiment.new_batch_trial(generator_run=gr, trial_type="type1")
        t2 = experiment.new_batch_trial(generator_run=gr, trial_type="type2")
        t1.set_status_quo_with_weight(status_quo=t1.arms[0], weight=0.5)
        t2.set_status_quo_with_weight(status_quo=t2.arms[0], weight=0.5)
        t1.run()
        t2.run()

    return experiment
Exemple #6
0
def get_branin_experiment_with_multi_objective(
    has_optimization_config: bool = True,
    has_objective_thresholds: bool = False,
    with_batch: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    num_objectives: int = 2,
) -> Experiment:
    exp = Experiment(
        name="branin_test_experiment",
        search_space=get_branin_search_space(
            with_fidelity_parameter=with_fidelity_parameter),
        optimization_config=get_branin_multi_objective_optimization_config(
            has_objective_thresholds=has_objective_thresholds,
            num_objectives=num_objectives,
        ) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        # Experiment chooses the name "status_quo" by default
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space,
                                    seed=TEST_SOBOL_SEED)
        sobol_run = sobol_generator.gen(n=5)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    return exp
Exemple #7
0
def get_factorial_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_status_quo: bool = False,
) -> Experiment:
    exp = Experiment(
        name="factorial_test_experiment",
        search_space=get_factorial_search_space(),
        optimization_config=OptimizationConfig(
            objective=Objective(metric=get_factorial_metric())
        )
        if has_optimization_config
        else None,
        runner=SyntheticRunner(),
        is_test=True,
        tracking_metrics=[get_factorial_metric("secondary_metric")],
    )

    if with_status_quo:
        exp.status_quo = Arm(
            parameters={
                "factor1": "level11",
                "factor2": "level21",
                "factor3": "level31",
            }
        )

    if with_batch:
        factorial_generator = get_factorial(search_space=exp.search_space)
        factorial_run = factorial_generator.gen(n=-1)
        exp.new_batch_trial(optimize_for_power=with_status_quo).add_generator_run(
            factorial_run
        )

    return exp
Exemple #8
0
    def testEarlyStoppedBatchTrial(self):
        self.batch.runner = SyntheticRunner()
        self.batch.run()
        self.batch.mark_early_stopped()

        self.assertEqual(self.batch.status, TrialStatus.EARLY_STOPPED)
        self.assertIsNotNone(self.batch.time_completed)
    def testFailedBatchTrial(self):
        self.batch.runner = SyntheticRunner()
        self.batch.run()
        self.batch.mark_failed()

        self.assertEqual(self.batch.status, TrialStatus.FAILED)
        self.assertIsNotNone(self.batch.time_completed)
def get_trial() -> Trial:
    experiment = get_experiment()
    trial = experiment.new_trial()
    arm = get_arms_from_dict(get_arm_weights1())[0]
    trial.add_arm(arm)
    trial.runner = SyntheticRunner()
    return trial
Exemple #11
0
def get_trial() -> Trial:
    experiment = get_experiment()
    trial = experiment.new_trial()
    arm = get_arms()[0]
    trial.add_arm(arm)
    trial.runner = SyntheticRunner()
    return trial
Exemple #12
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    search_space: Optional[SearchSpace] = None,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
    )
    exp = Experiment(
        name="branin_test_experiment",
        search_space=search_space,
        optimization_config=get_branin_optimization_config()
        if has_optimization_config
        else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(optimize_for_power=with_status_quo).add_generator_run(
            sobol_run
        )

    return exp
Exemple #13
0
def get_trial() -> Trial:
    experiment = get_experiment()
    trial = experiment.new_trial(ttl_seconds=72)
    arm = get_arms_from_dict(get_arm_weights1())[0]
    trial.add_arm(arm)
    trial.runner = SyntheticRunner()
    trial._generation_step_index = 0
    return trial
Exemple #14
0
def get_trial() -> Trial:
    experiment = get_experiment()
    trial = experiment.new_trial(ttl_seconds=72)
    arm = get_arms_from_dict(get_arm_weights1())[0]
    trial.add_arm(arm)
    trial.runner = SyntheticRunner()
    # pyre-fixme[8]: Attribute has type `None`; used as `int`.
    trial._generation_step_index = 0
    return trial
    def testBasicSetter(self):
        self.batch.runner = SyntheticRunner()
        self.assertIsNotNone(self.batch.runner)

        self.batch.trial_type = None
        self.assertIsNone(self.batch.trial_type)

        # Default experiment only supports None as trial_type
        with self.assertRaises(ValueError):
            self.batch.trial_type = ""
Exemple #16
0
def get_batch_trial() -> BatchTrial:
    experiment = get_experiment()
    batch = experiment.new_batch_trial()
    arms = get_arms()
    weights = get_weights()
    batch.add_arms_and_weights(arms=arms, weights=weights, multiplier=0.75)
    batch.mark_arm_abandoned(batch.arms[0].name, "abandoned reason")
    batch.runner = SyntheticRunner()
    batch.set_status_quo_with_weight(status_quo=arms[0], weight=0.5)
    return batch
Exemple #17
0
 def testExperimentWithoutName(self):
     exp = Experiment(
         search_space=get_branin_search_space(),
         tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
         runner=SyntheticRunner(),
     )
     self.assertEqual("Experiment(None)", str(exp))
     batch = exp.new_batch_trial()
     batch.add_arms_and_weights(arms=get_branin_arms(n=5, seed=0))
     batch.run()
     self.assertEqual(batch.run_metadata, {"name": "0"})
Exemple #18
0
    def testBatchLifecycle(self):
        staging_mock = PropertyMock()
        with patch.object(SyntheticRunner, "staging_required", staging_mock):
            mock_runner = SyntheticRunner()
            staging_mock.return_value = True
            self.batch.runner = mock_runner
            self.batch.run()
            self.assertEqual(self.batch.status, TrialStatus.STAGED)
            self.assertIsNotNone(self.batch.time_staged)
            self.assertTrue(self.batch.status.is_deployed)
            self.assertFalse(self.batch.status.expecting_data)

            # Cannot change arms or runner once run
            with self.assertRaises(ValueError):
                self.batch.add_arms_and_weights(arms=self.arms,
                                                weights=self.weights)

            with self.assertRaises(ValueError):
                self.batch.runner = None

            # Cannot run batch that was already run
            with self.assertRaises(ValueError):
                self.batch.run()

            self.batch.mark_running()
            self.assertEqual(self.batch.status, TrialStatus.RUNNING)
            self.assertIsNotNone(self.batch.time_run_started)
            self.assertTrue(self.batch.status.expecting_data)

            self.batch.complete()
            # Cannot complete that which is already completed
            with self.assertRaises(ValueError):
                self.batch.complete()

            # Verify trial is completed
            self.assertEqual(self.batch.status, TrialStatus.COMPLETED)
            self.assertIsNotNone(self.batch.time_completed)
            self.assertTrue(self.batch.status.is_terminal)

            # Cannot change status after BatchTrial is completed
            with self.assertRaises(ValueError):
                self.batch.mark_staged()

            with self.assertRaises(ValueError):
                self.batch.mark_completed()

            with self.assertRaises(ValueError):
                self.batch.mark_running()

            with self.assertRaises(ValueError):
                self.batch.mark_abandoned()

            with self.assertRaises(ValueError):
                self.batch.mark_failed()
    def testBadBehavior(self):
        # Add trial type that already exists
        with self.assertRaises(ValueError):
            self.experiment.add_trial_type("type1", SyntheticRunner())

        # Update runner for non-existent trial type
        with self.assertRaises(ValueError):
            self.experiment.update_runner("type3", SyntheticRunner())

        # Add metric for trial_type that doesn't exist
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(
                BraninMetric("m2", ["x1", "x2"]), "type3")

        # Try to remove metric that doesn't exist
        with self.assertRaises(ValueError):
            self.experiment.remove_tracking_metric("m3")

        # Try to change optimization metric to non-primary trial type
        with self.assertRaises(ValueError):
            self.experiment.update_tracking_metric(
                BraninMetric("m1", ["x1", "x2"]), "type2")

        # Update metric definition for trial_type that doesn't exist
        with self.assertRaises(ValueError):
            self.experiment.update_tracking_metric(
                BraninMetric("m2", ["x1", "x2"]), "type3")

        # Try to get runner for trial_type that's not supported
        batch = self.experiment.new_batch_trial()
        batch._trial_type = "type3"  # Force override trial type
        with self.assertRaises(ValueError):
            self.experiment.runner_for_trial(batch)

        # Try making trial with unsupported trial type
        with self.assertRaises(ValueError):
            self.experiment.new_batch_trial(trial_type="type3")

        # Try resetting runners.
        with self.assertRaises(NotImplementedError):
            self.experiment.reset_runners(SyntheticRunner())
Exemple #20
0
def get_batch_trial(abandon_arm: bool = True) -> BatchTrial:
    experiment = get_experiment()
    batch = experiment.new_batch_trial()
    arms = get_arms_from_dict(get_arm_weights1())
    weights = get_weights_from_dict(get_arm_weights1())
    batch.add_arms_and_weights(arms=arms, weights=weights, multiplier=0.75)
    if abandon_arm:
        batch.mark_arm_abandoned(batch.arms[0].name, "abandoned reason")
    batch.runner = SyntheticRunner()
    batch.set_status_quo_with_weight(status_quo=arms[0], weight=0.5)
    batch._generation_step_index = 0
    return batch
Exemple #21
0
    def testExperimentRunners(self):
        original_runner = SyntheticRunner()
        self.experiment.runner = original_runner
        batch = self.experiment.new_batch_trial()
        batch.run()
        self.assertEqual(batch.runner, original_runner)

        # Simulate a failed run/deployment, in which the runner is attached
        # but the actual run fails, and so the trial remains CANDIDATE.
        candidate_batch = self.experiment.new_batch_trial()
        candidate_batch.run()
        candidate_batch._status = TrialStatus.CANDIDATE

        identifier = {"new_runner": True}
        new_runner = SyntheticRunner(dummy_metadata=identifier)
        self.experiment.reset_runners(new_runner)
        # Don't update trials that have been run.
        self.assertEqual(batch.runner, original_runner)
        # Update default runner
        self.assertEqual(self.experiment.runner, new_runner)
        # Update candidate trial runners.
        self.assertEqual(self.experiment.trials[1].runner, new_runner)
Exemple #22
0
 def __init__(self,
              problem: BenchmarkProblem,
              total_iterations: int = 20,
              batch_size: int = 1) -> None:
     super().__init__(
         name=problem.name,
         search_space=problem.search_space,
         runner=SyntheticRunner(),
         optimization_config=problem.optimization_config,
     )
     self.problem = problem
     self.total_iterations = total_iterations
     self.batch_size = batch_size
Exemple #23
0
def _benchmark_replication_Dev_API(
    problem: BenchmarkProblem,
    method: GenerationStrategy,
    num_trials: int,
    experiment_name: str,
    batch_size: int = 1,
    raise_all_exceptions: bool = False,
    benchmark_trial: FunctionType = benchmark_trial,
    verbose_logging: bool = True,
    # Number of trials that need to fail for a replication to be considered failed.
    failed_trials_tolerated: int = 5,
    async_benchmark_options: Optional[AsyncBenchmarkOptions] = None,
) -> Tuple[Experiment, List[Exception]]:
    """Run a benchmark replication via the Developer API because the problem was
    set up with Ax classes (likely to allow for additional complexity like
    adding constraints or non-range parameters).
    """
    if async_benchmark_options is not None:
        raise NonRetryableBenchmarkingError(
            "`async_benchmark_options` not supported when using the Dev API."
        )

    exceptions = []
    experiment = Experiment(
        name=experiment_name,
        search_space=problem.search_space,
        optimization_config=problem.optimization_config,
        runner=SyntheticRunner(),
    )
    for trial_index in range(num_trials):
        try:
            gr = method.gen(experiment=experiment, n=batch_size)
            if batch_size == 1:
                trial = experiment.new_trial(generator_run=gr)
            else:
                assert batch_size > 1
                trial = experiment.new_batch_trial(generator_run=gr)
            trial.run()
            # TODO[T94059549]: Rm 3 lines below when attaching data in fetch is fixed.
            data = benchmark_trial(experiment=experiment, trial_index=trial_index)
            if not data.df.empty:
                experiment.attach_data(data=data)
        except Exception as err:  # TODO[T53975770]: test
            if raise_all_exceptions:
                raise
            exceptions.append(err)
        if len(exceptions) > failed_trials_tolerated:
            raise RuntimeError(  # TODO[T53975770]: test
                f"More than {failed_trials_tolerated} failed for {experiment_name}."
            )
    return experiment, exceptions
Exemple #24
0
    def test_fetch_as_class(self):
        class MyMetric(Metric):
            @property
            def fetch_multi_group_by_metric(self) -> Type[Metric]:
                return Metric

        m = MyMetric(name="test_metric")
        exp = Experiment(
            name="test",
            search_space=get_branin_search_space(),
            tracking_metrics=[m],
            runner=SyntheticRunner(),
        )
        self.assertEqual(exp._metrics_by_class(), {Metric: [m]})
Exemple #25
0
def get_experiment_with_multi_objective() -> Experiment:
    optimization_config = get_multi_objective_optimization_config()

    exp = Experiment(
        name="test_experiment_multi_objective",
        search_space=get_branin_search_space(),
        optimization_config=optimization_config,
        description="test experiment with multi objective",
        runner=SyntheticRunner(),
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )

    return exp
Exemple #26
0
    def _setupBraninExperiment(self, n: int) -> Experiment:
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        batch.add_arms_and_weights(arms=get_branin_arms(n=n, seed=0))
        batch.run()

        batch_2 = exp.new_batch_trial()
        batch_2.add_arms_and_weights(arms=get_branin_arms(n=3 * n, seed=1))
        batch_2.run()
        return exp
Exemple #27
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_trial: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    with_str_choice_param: bool = False,
    search_space: Optional[SearchSpace] = None,
    minimize: bool = False,
    named: bool = True,
    with_completed_trial: bool = False,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
        with_str_choice_param=with_str_choice_param,
    )
    exp = Experiment(
        name="branin_test_experiment" if named else None,
        search_space=search_space,
        optimization_config=get_branin_optimization_config(
            minimize=minimize) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    if with_trial or with_completed_trial:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=1)
        trial = exp.new_trial(generator_run=sobol_run)

        if with_completed_trial:
            trial.mark_running(no_runner_required=True)
            exp.attach_data(
                get_branin_data(trials=[trial]))  # Add data for one trial
            trial.mark_completed()

    return exp
    def testMTExperimentFlow(self):
        self.assertTrue(self.experiment.supports_trial_type("type1"))
        self.assertTrue(self.experiment.supports_trial_type("type2"))
        self.assertFalse(self.experiment.supports_trial_type(None))

        n = 10
        arms = get_branin_arms(n=n, seed=0)

        b1 = self.experiment.new_batch_trial()
        b1.add_arms_and_weights(arms=arms)
        self.assertEqual(b1.trial_type, "type1")
        b1.run()
        self.assertEqual(b1.run_metadata["dummy_metadata"], "dummy1")

        self.experiment.update_runner("type2",
                                      SyntheticRunner(dummy_metadata="dummy3"))
        b2 = self.experiment.new_batch_trial()
        b2.trial_type = "type2"
        b2.add_arms_and_weights(arms=arms)
        self.assertEqual(b2.trial_type, "type2")
        b2.run()
        self.assertEqual(b2.run_metadata["dummy_metadata"], "dummy3")

        df = self.experiment.fetch_data().df
        for _, row in df.iterrows():
            # Make sure proper metric present for each batch only
            self.assertEqual(row["metric_name"],
                             "m1" if row["trial_index"] == 0 else "m2")

        arm_0_slice = df.loc[df["arm_name"] == "0_0"]
        self.assertNotEqual(
            float(arm_0_slice[df["trial_index"] == 0]["mean"]),
            float(arm_0_slice[df["trial_index"] == 1]["mean"]),
        )
        self.assertEqual(len(df), 2 * n)

        # Set 2 metrics to be equal
        self.experiment.update_tracking_metric(BraninMetric(
            "m2", ["x1", "x2"]),
                                               trial_type="type2")
        df = self.experiment.fetch_data().df
        arm_0_slice = df.loc[df["arm_name"] == "0_0"]
        self.assertAlmostEqual(
            float(arm_0_slice[df["trial_index"] == 0]["mean"]),
            float(arm_0_slice[df["trial_index"] == 1]["mean"]),
            places=10,
        )
Exemple #29
0
def get_experiment_with_multi_objective() -> Experiment:
    objective = get_multi_objective()
    outcome_constraints = [get_outcome_constraint()]
    optimization_config = OptimizationConfig(
        objective=objective, outcome_constraints=outcome_constraints)

    exp = Experiment(
        name="test_experiment_multi_objective",
        search_space=get_branin_search_space(),
        optimization_config=optimization_config,
        description="test experiment with multi objective",
        runner=SyntheticRunner(),
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )

    return exp
Exemple #30
0
 def testBasic(self) -> None:
     self.assertTrue(self.experiment.is_simple_experiment)
     trial = self.experiment.new_trial()
     with self.assertRaises(NotImplementedError):
         trial.runner = SyntheticRunner()
     with self.assertRaises(NotImplementedError):
         self.experiment.add_tracking_metric(Metric(name="test"))
     with self.assertRaises(NotImplementedError):
         self.experiment.update_tracking_metric(Metric(name="test"))
     self.assertTrue(self.experiment.eval_trial(trial).df.empty)
     batch = self.experiment.new_batch_trial()
     batch.add_arm(Arm(parameters={"x1": 5, "x2": 10}))
     self.assertEqual(self.experiment.eval_trial(batch).df["mean"][0], 15)
     self.experiment.new_batch_trial().add_arm(Arm(parameters={"x1": 15, "x2": 25}))
     self.assertAlmostEqual(self.experiment.eval().df["mean"][1], 40)
     self.assertEqual(batch.fetch_data().df["mean"][0], 15)
     self.assertAlmostEqual(self.experiment.fetch_data().df["mean"][1], 40)