def testEq(self):
        exp2 = get_multi_type_experiment()

        # Should be equal to start
        self.assertTrue(self.experiment == exp2)

        self.experiment.add_tracking_metric(BraninMetric("m3", ["x2", "x1"]),
                                            trial_type="type1",
                                            canonical_name="m4")

        # Test different set of metrics
        self.assertFalse(self.experiment == exp2)

        exp2.add_tracking_metric(BraninMetric("m3", ["x2", "x1"]),
                                 trial_type="type1",
                                 canonical_name="m5")

        # Test different metric definitions
        self.assertFalse(self.experiment == exp2)

        exp2.update_tracking_metric(BraninMetric("m3", ["x2", "x1"]),
                                    trial_type="type1",
                                    canonical_name="m4")

        # Should be the same
        self.assertTrue(self.experiment == exp2)

        exp2.remove_tracking_metric("m3")
        self.assertFalse(self.experiment == exp2)
Beispiel #2
0
def get_multi_type_experiment(
    add_trial_type: bool = True, add_trials: bool = False
) -> MultiTypeExperiment:
    oc = OptimizationConfig(Objective(BraninMetric("m1", ["x1", "x2"])))
    experiment = MultiTypeExperiment(
        name="test_exp",
        search_space=get_branin_search_space(),
        default_trial_type="type1",
        default_runner=SyntheticRunner(dummy_metadata="dummy1"),
        optimization_config=oc,
    )
    experiment.add_trial_type(
        trial_type="type2", runner=SyntheticRunner(dummy_metadata="dummy2")
    )
    # Switch the order of variables so metric gives different results
    experiment.add_tracking_metric(
        BraninMetric("m2", ["x2", "x1"]), trial_type="type2", canonical_name="m1"
    )

    if add_trials and add_trial_type:
        generator = get_sobol(experiment.search_space)
        gr = generator.gen(10)
        t1 = experiment.new_batch_trial(generator_run=gr, trial_type="type1")
        t2 = experiment.new_batch_trial(generator_run=gr, trial_type="type2")
        t1.set_status_quo_with_weight(status_quo=t1.arms[0], weight=0.5)
        t2.set_status_quo_with_weight(status_quo=t2.arms[0], weight=0.5)
        t1.run()
        t2.run()

    return experiment
Beispiel #3
0
    def test_immutable_search_space_and_opt_config(self):
        mutable_exp = self._setupBraninExperiment(n=5)
        self.assertFalse(mutable_exp.immutable_search_space_and_opt_config)
        immutable_exp = Experiment(
            name="test4",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            optimization_config=get_branin_optimization_config(),
            runner=SyntheticRunner(),
            properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},
        )
        self.assertTrue(immutable_exp.immutable_search_space_and_opt_config)
        with self.assertRaises(UnsupportedError):
            immutable_exp.optimization_config = get_branin_optimization_config(
            )
        immutable_exp.new_batch_trial()
        with self.assertRaises(UnsupportedError):
            immutable_exp.search_space = get_branin_search_space()

        # Check that passing the property as just a string is processed
        # correctly.
        immutable_exp_2 = Experiment(
            name="test4",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            runner=SyntheticRunner(),
            properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value: True},
        )
        self.assertTrue(immutable_exp_2.immutable_search_space_and_opt_config)
 def test_transform_optimization_config_with_relative_constraints(self):
     relativize = Relativize(
         search_space=None,
         observation_features=[],
         observation_data=[],
         modelbridge=self.model,
     )
     optimization_config = get_branin_optimization_config()
     optimization_config.outcome_constraints = [
         OutcomeConstraint(
             metric=BraninMetric("b2", ["x2", "x1"]),
             op=ComparisonOp.GEQ,
             bound=-200.0,
             relative=True,
         )
     ]
     new_config = relativize.transform_optimization_config(
         optimization_config=optimization_config,
         modelbridge=None,
         fixed_features=Mock(),
     )
     self.assertEqual(new_config.objective, optimization_config.objective)
     self.assertEqual(
         new_config.outcome_constraints[0].bound,
         optimization_config.outcome_constraints[0].bound,
     )
     self.assertFalse(new_config.outcome_constraints[0].relative)
Beispiel #5
0
 def testOODStatusQuo(self):
     # An OOD status quo arm without a trial index will raise an error
     experiment = get_branin_experiment()
     experiment.add_tracking_metric(
         BraninMetric(name="m2", param_names=["x1", "x2"]))
     metrics = list(experiment.metrics.values())
     sobol = Models.SOBOL(experiment.search_space)
     a = sobol.gen(5)
     experiment.new_batch_trial(generator_run=a).run()
     # Experiments with batch trials must specify a trial index
     with self.assertRaises(UnsupportedError):
         compute_pareto_frontier(
             experiment,
             metrics[0],
             metrics[1],
             absolute_metrics=[m.name for m in metrics],
         )
     compute_pareto_frontier(
         experiment,
         metrics[0],
         metrics[1],
         trial_index=0,
         absolute_metrics=[m.name for m in metrics],
     )
     compute_pareto_frontier(
         experiment,
         metrics[0],
         metrics[1],
         data=experiment.fetch_data(),
         absolute_metrics=[m.name for m in metrics],
     )
Beispiel #6
0
 def setUp(self):
     experiment = get_branin_experiment()
     experiment.add_tracking_metric(
         BraninMetric(name="m2", param_names=["x1", "x2"]))
     sobol = Models.SOBOL(experiment.search_space)
     a = sobol.gen(5)
     experiment.new_batch_trial(generator_run=a).run()
     self.experiment = experiment
     self.metrics = list(experiment.metrics.values())
Beispiel #7
0
 def testExperimentWithoutName(self):
     exp = Experiment(
         search_space=get_branin_search_space(),
         tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
         runner=SyntheticRunner(),
     )
     self.assertEqual("Experiment(None)", str(exp))
     batch = exp.new_batch_trial()
     batch.add_arms_and_weights(arms=get_branin_arms(n=5, seed=0))
     batch.run()
     self.assertEqual(batch.run_metadata, {"name": "0"})
    def testBadBehavior(self):
        # Add trial type that already exists
        with self.assertRaises(ValueError):
            self.experiment.add_trial_type("type1", SyntheticRunner())

        # Update runner for non-existent trial type
        with self.assertRaises(ValueError):
            self.experiment.update_runner("type3", SyntheticRunner())

        # Add metric for trial_type that doesn't exist
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(
                BraninMetric("m2", ["x1", "x2"]), "type3")

        # Try to remove metric that doesn't exist
        with self.assertRaises(ValueError):
            self.experiment.remove_tracking_metric("m3")

        # Try to change optimization metric to non-primary trial type
        with self.assertRaises(ValueError):
            self.experiment.update_tracking_metric(
                BraninMetric("m1", ["x1", "x2"]), "type2")

        # Update metric definition for trial_type that doesn't exist
        with self.assertRaises(ValueError):
            self.experiment.update_tracking_metric(
                BraninMetric("m2", ["x1", "x2"]), "type3")

        # Try to get runner for trial_type that's not supported
        batch = self.experiment.new_batch_trial()
        batch._trial_type = "type3"  # Force override trial type
        with self.assertRaises(ValueError):
            self.experiment.runner_for_trial(batch)

        # Try making trial with unsupported trial type
        with self.assertRaises(ValueError):
            self.experiment.new_batch_trial(trial_type="type3")

        # Try resetting runners.
        with self.assertRaises(NotImplementedError):
            self.experiment.reset_runners(SyntheticRunner())
Beispiel #9
0
    def _setupBraninExperiment(self, n: int) -> Experiment:
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        batch.add_arms_and_weights(arms=get_branin_arms(n=n, seed=0))
        batch.run()

        batch_2 = exp.new_batch_trial()
        batch_2.add_arms_and_weights(arms=get_branin_arms(n=3 * n, seed=1))
        batch_2.run()
        return exp
    def testMTExperimentFlow(self):
        self.assertTrue(self.experiment.supports_trial_type("type1"))
        self.assertTrue(self.experiment.supports_trial_type("type2"))
        self.assertFalse(self.experiment.supports_trial_type(None))

        n = 10
        arms = get_branin_arms(n=n, seed=0)

        b1 = self.experiment.new_batch_trial()
        b1.add_arms_and_weights(arms=arms)
        self.assertEqual(b1.trial_type, "type1")
        b1.run()
        self.assertEqual(b1.run_metadata["dummy_metadata"], "dummy1")

        self.experiment.update_runner("type2",
                                      SyntheticRunner(dummy_metadata="dummy3"))
        b2 = self.experiment.new_batch_trial()
        b2.trial_type = "type2"
        b2.add_arms_and_weights(arms=arms)
        self.assertEqual(b2.trial_type, "type2")
        b2.run()
        self.assertEqual(b2.run_metadata["dummy_metadata"], "dummy3")

        df = self.experiment.fetch_data().df
        for _, row in df.iterrows():
            # Make sure proper metric present for each batch only
            self.assertEqual(row["metric_name"],
                             "m1" if row["trial_index"] == 0 else "m2")

        arm_0_slice = df.loc[df["arm_name"] == "0_0"]
        self.assertNotEqual(
            float(arm_0_slice[df["trial_index"] == 0]["mean"]),
            float(arm_0_slice[df["trial_index"] == 1]["mean"]),
        )
        self.assertEqual(len(df), 2 * n)

        # Set 2 metrics to be equal
        self.experiment.update_tracking_metric(BraninMetric(
            "m2", ["x1", "x2"]),
                                               trial_type="type2")
        df = self.experiment.fetch_data().df
        arm_0_slice = df.loc[df["arm_name"] == "0_0"]
        self.assertAlmostEqual(
            float(arm_0_slice[df["trial_index"] == 0]["mean"]),
            float(arm_0_slice[df["trial_index"] == 1]["mean"]),
            places=10,
        )
Beispiel #11
0
    def testFetchTrialsData(self):
        exp = self._setupBraninExperiment(n=5)
        batch_0 = exp.trials[0]
        batch_1 = exp.trials[1]
        batch_0.mark_completed()
        batch_1.mark_completed()
        batch_0_data = exp.fetch_trials_data(trial_indices=[0])
        self.assertEqual(set(batch_0_data.df["trial_index"].values), {0})
        self.assertEqual(set(batch_0_data.df["arm_name"].values),
                         {a.name
                          for a in batch_0.arms})
        batch_1_data = exp.fetch_trials_data(trial_indices=[1])
        self.assertEqual(set(batch_1_data.df["trial_index"].values), {1})
        self.assertEqual(set(batch_1_data.df["arm_name"].values),
                         {a.name
                          for a in batch_1.arms})
        self.assertEqual(
            exp.fetch_trials_data(trial_indices=[0, 1]).df.shape[0],
            len(exp.arms_by_name) * 2,
        )

        with self.assertRaisesRegex(ValueError, ".* not associated .*"):
            exp.fetch_trials_data(trial_indices=[2])
        # Try to fetch data when there are only metrics and no attached data.
        exp.remove_tracking_metric(
            metric_name="branin")  # Remove implemented metric.
        exp.add_tracking_metric(
            BraninMetric(name="branin",
                         param_names=["x1",
                                      "x2"]))  # Add unimplemented metric.
        self.assertEqual(len(exp.fetch_trials_data(trial_indices=[0]).map_df),
                         10)
        # Try fetching attached data.
        exp.attach_data(batch_0_data)
        exp.attach_data(batch_1_data)
        self.assertEqual(exp.fetch_trials_data(trial_indices=[0]),
                         batch_0_data)
        self.assertEqual(exp.fetch_trials_data(trial_indices=[1]),
                         batch_1_data)
        self.assertEqual(set(batch_0_data.df["trial_index"].values), {0})
        self.assertEqual(set(batch_0_data.df["arm_name"].values),
                         {a.name
                          for a in batch_0.arms})
 def test_transform_optimization_config_with_non_relative_constraints(self):
     relativize = Relativize(
         search_space=None,
         observation_features=[],
         observation_data=[],
         modelbridge=self.model,
     )
     optimization_config = get_branin_optimization_config()
     optimization_config.outcome_constraints = [
         OutcomeConstraint(
             metric=BraninMetric("b2", ["x2", "x1"]),
             op=ComparisonOp.GEQ,
             bound=-200.0,
             relative=False,
         )
     ]
     with self.assertRaisesRegex(ValueError, "All constraints must be relative"):
         relativize.transform_optimization_config(
             optimization_config=optimization_config,
             modelbridge=None,
             fixed_features=Mock(),
         )
Beispiel #13
0
 def testLowerBound(self):
     branin_lb = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.GEQ,
                     bound=5.0,
                     relative=False,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         batch_size=2,
         total_iterations=4,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_lb],
     )
     suite.generate_report(include_individual=True)
Beispiel #14
0
 def testRelativeConstraint(self):
     branin_rel = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.LEQ,
                     bound=5.0,
                     relative=True,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         total_iterations=5,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_rel],
     )
     with self.assertRaises(ValueError):
         suite.generate_report()
Beispiel #15
0
def get_branin_experiment_with_timestamp_map_metric(
    with_status_quo: bool = False,
    rate: Optional[float] = None,
) -> Experiment:
    exp = Experiment(
        name="branin_with_timestamp_map_metric",
        search_space=get_branin_search_space(),
        optimization_config=OptimizationConfig(objective=Objective(
            metric=BraninTimestampMapMetric(
                name="branin_map", param_names=["x1", "x2"], rate=rate),
            minimize=True,
        )),
        tracking_metrics=[
            BraninMetric(name="branin", param_names=["x1", "x2"])
        ],
        runner=SyntheticRunner(),
        default_data_type=DataType.MAP_DATA,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    return exp
Beispiel #16
0
    def testStatusQuoSetter(self):
        sq_parameters = self.experiment.status_quo.parameters
        self.experiment.status_quo = None
        self.assertIsNone(self.experiment.status_quo)

        # Verify normal update
        sq_parameters["w"] = 3.5
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(self.experiment.status_quo.parameters["w"], 3.5)
        self.assertEqual(self.experiment.status_quo.name, "status_quo")
        self.assertTrue("status_quo" in self.experiment.arms_by_name)

        # Verify all None values
        self.experiment.status_quo = Arm(
            {n: None
             for n in sq_parameters.keys()})
        self.assertIsNone(self.experiment.status_quo.parameters["w"])

        # Try extra param
        sq_parameters["a"] = 4
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Try wrong type
        sq_parameters.pop("a")
        sq_parameters["w"] = "hello"
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Verify arms_by_signature, arms_by_name only contains status_quo
        self.assertEqual(len(self.experiment.arms_by_signature), 1)
        self.assertEqual(len(self.experiment.arms_by_name), 1)

        # Change status quo, verify still just 1 arm
        sq_parameters["w"] = 3.6
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(len(self.experiment.arms_by_signature), 1)
        self.assertEqual(len(self.experiment.arms_by_name), 1)

        # Make a batch, add status quo to it, then change exp status quo, verify 2 arms
        batch = self.experiment.new_batch_trial()
        batch.set_status_quo_with_weight(self.experiment.status_quo, 1)
        sq_parameters["w"] = 3.7
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(len(self.experiment.arms_by_signature), 2)
        self.assertEqual(len(self.experiment.arms_by_name), 2)
        self.assertEqual(self.experiment.status_quo.name, "status_quo_e0")
        self.assertTrue("status_quo_e0" in self.experiment.arms_by_name)

        # Try missing param
        sq_parameters.pop("w")
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Actually name the status quo.
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        arms = get_branin_arms(n=1, seed=0)
        batch.add_arms_and_weights(arms=arms)
        self.assertIsNone(exp.status_quo)
        exp.status_quo = arms[0]
        self.assertEqual(exp.status_quo.name, "0_0")

        # Try setting sq to existing arm with different name
        with self.assertRaises(ValueError):
            exp.status_quo = Arm(arms[0].parameters, name="new_name")
Beispiel #17
0
def get_branin_metric(name="branin") -> BraninMetric:
    param_names = ["x1", "x2"]
    return BraninMetric(name=name, param_names=param_names, noise_sd=0.01)
Beispiel #18
0
        optimization_config: optimization configuration
        search_space: search space, on which this problem is defined
    """

    name: str
    fbest: float
    optimization_config: OptimizationConfig
    search_space: SearchSpace


# Branin problems
branin = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmin,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=BraninMetric(
            name="branin_objective", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=True,
    )),
    search_space=get_branin_search_space(),
)

branin_max = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmax,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=NegativeBraninMetric(
            name="neg_branin", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=False,
    )),
    search_space=get_branin_search_space(),
)
Beispiel #19
0
    def testObservedParetoFrontiers(self):
        experiment = get_branin_experiment(
            with_batch=True, has_optimization_config=False, with_status_quo=True
        )

        # Optimization config is not optional
        with self.assertRaises(ValueError):
            get_observed_pareto_frontiers(experiment=experiment, data=Data())

        metrics = [
            BraninMetric(name="m1", param_names=["x1", "x2"], lower_is_better=True),
            NegativeBraninMetric(
                name="m2", param_names=["x1", "x2"], lower_is_better=True
            ),
            BraninMetric(name="m3", param_names=["x1", "x2"], lower_is_better=True),
        ]
        bounds = [0, -100, 0]
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metric,
                bound=bounds[i],
                relative=True,
                op=ComparisonOp.LEQ,
            )
            for i, metric in enumerate(metrics)
        ]
        objective = MultiObjective(metrics=metrics, minimize=True)
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=objective,
            objective_thresholds=objective_thresholds,
        )
        experiment.optimization_config = optimization_config
        experiment.trials[0].run()

        # For the check below, compute which arms are better than SQ
        df = experiment.fetch_data().df
        df["sem"] = np.nan
        data = Data(df)
        sq_val = df[(df["arm_name"] == "status_quo") & (df["metric_name"] == "m1")][
            "mean"
        ].values[0]
        pareto_arms = sorted(
            df[(df["mean"] <= sq_val) & (df["metric_name"] == "m1")]["arm_name"]
            .unique()
            .tolist()
        )

        pfrs = get_observed_pareto_frontiers(experiment=experiment, data=data)
        # We have all pairs of metrics
        self.assertEqual(len(pfrs), 3)
        true_pairs = [("m1", "m2"), ("m1", "m3"), ("m2", "m3")]
        for i, pfr in enumerate(pfrs):
            self.assertEqual(pfr.primary_metric, true_pairs[i][0])
            self.assertEqual(pfr.secondary_metric, true_pairs[i][1])
            self.assertEqual(pfr.absolute_metrics, [])
            self.assertEqual(list(pfr.means.keys()), ["m1", "m2", "m3"])
            self.assertEqual(len(pfr.means["m1"]), len(pareto_arms))
            self.assertTrue(np.isnan(pfr.sems["m1"]).all())
            self.assertEqual(len(pfr.arm_names), len(pareto_arms))
            arm_idx = np.argsort(pfr.arm_names)
            for i, idx in enumerate(arm_idx):
                name = pareto_arms[i]
                self.assertEqual(pfr.arm_names[idx], name)
                self.assertEqual(
                    pfr.param_dicts[idx], experiment.arms_by_name[name].parameters
                )
Beispiel #20
0
    def testFetchAndStoreData(self):
        n = 10
        exp = self._setupBraninExperiment(n)
        batch = exp.trials[0]

        # Test fetch data
        batch_data = batch.fetch_data()
        self.assertEqual(len(batch_data.df), n)

        exp_data = exp.fetch_data()
        exp_data2 = exp.metrics["b"].fetch_experiment_data(exp)
        self.assertEqual(len(exp_data2.df), 4 * n)
        self.assertEqual(len(exp_data.df), 4 * n)
        self.assertEqual(len(exp.arms_by_name), 4 * n)

        # Verify data lookup is empty
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), 0)

        # Test local storage
        t1 = exp.attach_data(batch_data)
        t2 = exp.attach_data(exp_data)

        full_dict = exp.data_by_trial
        self.assertEqual(len(full_dict), 2)  # data for 2 trials
        self.assertEqual(len(full_dict[0]), 2)  # 2 data objs for batch 0

        # Test retrieving original batch 0 data
        self.assertEqual(len(exp.lookup_data_for_ts(t1).df), n)
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), n)

        # Test retrieving full exp data
        self.assertEqual(len(exp.lookup_data_for_ts(t2).df), 4 * n)

        with self.assertRaisesRegex(ValueError, ".* for metric"):
            exp.attach_data(batch_data, combine_with_last_data=True)

        new_data = Data(
            df=pd.DataFrame.from_records(
                [
                    {
                        "arm_name": "0_0",
                        "metric_name": "z",
                        "mean": 3,
                        "sem": 0,
                        "trial_index": 0,
                    }
                ]
            )
        )
        t3 = exp.attach_data(new_data, combine_with_last_data=True)
        self.assertEqual(len(full_dict[0]), 3)  # 3 data objs for batch 0 now
        self.assertIn("z", exp.lookup_data_for_ts(t3).df["metric_name"].tolist())

        # Verify we don't get the data if the trial is abandoned
        batch._status = TrialStatus.ABANDONED
        self.assertEqual(len(batch.fetch_data().df), 0)
        self.assertEqual(len(exp.fetch_data().df), 3 * n)

        # For `CANDIDATE` trials, we append attached data to fetched data,
        # so the attached data row with metric name "z" should appear in fetched
        # data.
        batch._status = TrialStatus.CANDIDATE
        self.assertEqual(len(batch.fetch_data().df), n + 1)
        # n arms in trial #0, 3 * n arms in trial #1
        self.assertEqual(len(exp.fetch_data().df), 4 * n + 1)
        metrics_in_data = set(batch.fetch_data().df["metric_name"].values)
        self.assertEqual(metrics_in_data, {"b", "z"})

        # Verify we do get the stored data if there are an unimplemented metrics.
        del exp._data_by_trial[0][t3]  # Remove attached data for nonexistent metric.
        exp.remove_tracking_metric(metric_name="b")  # Remove implemented metric.
        exp.add_tracking_metric(Metric(name="dummy"))  # Add unimplemented metric.
        batch._status = TrialStatus.RUNNING
        # Data should be getting looked up now.
        self.assertEqual(batch.fetch_data(), exp.lookup_data_for_ts(t1))
        self.assertEqual(exp.fetch_data(), exp.lookup_data_for_ts(t2))
        metrics_in_data = set(batch.fetch_data().df["metric_name"].values)
        # Data for metric "z" should no longer be present since we removed it.
        self.assertEqual(metrics_in_data, {"b"})

        # Check that error will be raised if dummy and implemented metrics are
        # fetched at once.
        with self.assertRaisesRegex(ValueError, "Unexpected combination"):
            exp.fetch_data(
                [BraninMetric(name="b", param_names=["x1", "x2"]), Metric(name="m")]
            )
Beispiel #21
0
    ),
)


### Branin problem, D=100 and sensitivity analysis

# Original x1 and x2 have different bounds, so we create blocks of 50 for each
# with each of the bounds and set the relevant parameters in those blocks.

branin_100 = BenchmarkProblem(
    name="Branin, D=100",
    optimal_value=0.397887,
    optimization_config=OptimizationConfig(
        objective=Objective(
            metric=BraninMetric(
                name="objective", param_names=["x19", "x64"], noise_sd=0.0
            ),
            minimize=True,
        )
    ),
    search_space=SearchSpace(
        parameters=[  # pyre-ignore
            RangeParameter(
                name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=-5.0, upper=10.0
            )
            for i in range(50)
        ]
        + [
            RangeParameter(
                name=f"x{i + 50}",
                parameter_type=ParameterType.FLOAT,