コード例 #1
0
    def test_MTGP(self):
        """Tests MTGP instantiation."""
        # Test Multi-type MTGP
        exp = get_multi_type_experiment(add_trials=True)
        mtgp = get_MTGP(experiment=exp, data=exp.fetch_data())
        self.assertIsInstance(mtgp, TorchModelBridge)

        # Test Single-type MTGP
        exp = get_branin_experiment()
        # Check that factory generates a valid sobol modelbridge.
        sobol = get_sobol(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(5):
            sobol_run = sobol.gen(n=1)
            t = exp.new_batch_trial().add_generator_run(sobol_run)
            t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
            t.run().mark_completed()
        mtgp = get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=0)
        self.assertIsInstance(mtgp, TorchModelBridge)
        # mtgp_run = mtgp.gen(
        #     n=1
        # )  # TODO[T110948251]: This is broken at the ChoiceEncode level

        with self.assertRaises(ValueError):
            get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=9)

        exp = get_branin_experiment()
        sobol = get_sobol(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        sobol_run = sobol.gen(n=1)
        t = exp.new_batch_trial().add_generator_run(sobol_run)
        t.run().mark_completed()

        with self.assertRaises(ValueError):
            get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=0)
コード例 #2
0
 def test_model_kwargs(self):
     """Tests that model kwargs are passed correctly."""
     exp = get_branin_experiment()
     sobol = get_sobol(
         search_space=exp.search_space, init_position=2, scramble=False, seed=239
     )
     self.assertIsInstance(sobol, RandomModelBridge)
     for _ in range(5):
         sobol_run = sobol.gen(1)
         exp.new_batch_trial().add_generator_run(sobol_run).run().mark_completed()
     with self.assertRaises(TypeError):
         get_sobol(search_space=exp.search_space, nonexistent=True)
コード例 #3
0
ファイル: core_stubs.py プロジェクト: Balandat/Ax
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_trial: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    with_str_choice_param: bool = False,
    search_space: Optional[SearchSpace] = None,
    minimize: bool = False,
    named: bool = True,
    with_completed_trial: bool = False,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
        with_str_choice_param=with_str_choice_param,
    )
    exp = Experiment(
        name="branin_test_experiment" if named else None,
        search_space=search_space,
        optimization_config=get_branin_optimization_config(
            minimize=minimize) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    if with_trial or with_completed_trial:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=1)
        trial = exp.new_trial(generator_run=sobol_run)

        if with_completed_trial:
            trial.mark_running(no_runner_required=True)
            exp.attach_data(
                get_branin_data(trials=[trial]))  # Add data for one trial
            trial.mark_completed()

    return exp
コード例 #4
0
ファイル: core_stubs.py プロジェクト: Balandat/Ax
def get_branin_experiment_with_multi_objective(
    has_optimization_config: bool = True,
    has_objective_thresholds: bool = False,
    with_batch: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    num_objectives: int = 2,
) -> Experiment:
    exp = Experiment(
        name="branin_test_experiment",
        search_space=get_branin_search_space(
            with_fidelity_parameter=with_fidelity_parameter),
        optimization_config=get_branin_multi_objective_optimization_config(
            has_objective_thresholds=has_objective_thresholds,
            num_objectives=num_objectives,
        ) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        # Experiment chooses the name "status_quo" by default
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space,
                                    seed=TEST_SOBOL_SEED)
        sobol_run = sobol_generator.gen(n=5)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    return exp
コード例 #5
0
ファイル: core_stubs.py プロジェクト: proteanblank/Ax
def get_multi_type_experiment_with_multi_objective(
    add_trials: bool = False,
) -> MultiTypeExperiment:
    oc = get_branin_multi_objective_optimization_config()
    experiment = MultiTypeExperiment(
        name="test_exp",
        search_space=get_branin_search_space(),
        default_trial_type="type1",
        default_runner=SyntheticRunner(dummy_metadata="dummy1"),
        optimization_config=oc,
    )
    experiment.add_trial_type(
        trial_type="type2", runner=SyntheticRunner(dummy_metadata="dummy2")
    )

    if add_trials:
        generator = get_sobol(experiment.search_space)
        gr = generator.gen(10)
        t1 = experiment.new_batch_trial(generator_run=gr, trial_type="type1")
        t2 = experiment.new_batch_trial(generator_run=gr, trial_type="type2")
        t1.set_status_quo_with_weight(status_quo=t1.arms[0], weight=0.5)
        t2.set_status_quo_with_weight(status_quo=t2.arms[0], weight=0.5)
        t1.run()
        t2.run()

    return experiment
コード例 #6
0
ファイル: core_stubs.py プロジェクト: tangzhenyu/ax
def get_multi_type_experiment(
    add_trial_type: bool = True, add_trials: bool = False
) -> MultiTypeExperiment:
    oc = OptimizationConfig(Objective(BraninMetric("m1", ["x1", "x2"])))
    experiment = MultiTypeExperiment(
        name="test_exp",
        search_space=get_branin_search_space(),
        default_trial_type="type1",
        default_runner=SyntheticRunner(dummy_metadata="dummy1"),
        optimization_config=oc,
    )
    experiment.add_trial_type(
        trial_type="type2", runner=SyntheticRunner(dummy_metadata="dummy2")
    )
    # Switch the order of variables so metric gives different results
    experiment.add_tracking_metric(
        BraninMetric("m2", ["x2", "x1"]), trial_type="type2", canonical_name="m1"
    )

    if add_trials and add_trial_type:
        generator = get_sobol(experiment.search_space)
        gr = generator.gen(10)
        t1 = experiment.new_batch_trial(generator_run=gr, trial_type="type1")
        t2 = experiment.new_batch_trial(generator_run=gr, trial_type="type2")
        t1.set_status_quo_with_weight(status_quo=t1.arms[0], weight=0.5)
        t2.set_status_quo_with_weight(status_quo=t2.arms[0], weight=0.5)
        t1.run()
        t2.run()

    return experiment
コード例 #7
0
ファイル: core_stubs.py プロジェクト: tangzhenyu/ax
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    search_space: Optional[SearchSpace] = None,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
    )
    exp = Experiment(
        name="branin_test_experiment",
        search_space=search_space,
        optimization_config=get_branin_optimization_config()
        if has_optimization_config
        else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(optimize_for_power=with_status_quo).add_generator_run(
            sobol_run
        )

    return exp
コード例 #8
0
ファイル: test_factory.py プロジェクト: proteanblank/Ax
 def test_MOO_with_more_outcomes_than_thresholds(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_optimization_config=False)
     metric_c = Metric(name="c", lower_is_better=False)
     metric_a = Metric(name="a", lower_is_better=False)
     objective_thresholds = [
         ObjectiveThreshold(
             metric=metric_c,
             bound=2.0,
             relative=False,
         ),
         ObjectiveThreshold(
             metric=metric_a,
             bound=1.0,
             relative=False,
         ),
     ]
     experiment.optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(objectives=[
             Objective(metric=metric_a),
             Objective(metric=metric_c),
         ]),
         objective_thresholds=objective_thresholds,
     )
     experiment.add_tracking_metric(Metric(name="b", lower_is_better=False))
     sobol = get_sobol(search_space=experiment.search_space, )
     sobol_run = sobol.gen(1)
     experiment.new_batch_trial().add_generator_run(
         sobol_run).run().mark_completed()
     data = Data(
         pd.DataFrame(
             data={
                 "arm_name": ["0_0", "0_0", "0_0"],
                 "metric_name": ["a", "b", "c"],
                 "mean": [1.0, 2.0, 3.0],
                 "trial_index": [0, 0, 0],
                 "sem": [0, 0, 0],
             }))
     test_names_to_fns = {
         "MOO_NEHVI": get_MOO_NEHVI,
         "MOO_EHVI": get_MOO_NEHVI,
         "MOO_PAREGO": get_MOO_PAREGO,
         "MOO_RS": get_MOO_RS,
     }
     for test_name, factory_fn in test_names_to_fns.items():
         with self.subTest(test_name):
             moo_model = factory_fn(
                 experiment=experiment,
                 data=data,
             )
             moo_gr = moo_model.gen(n=1)
             obj_t = moo_gr.gen_metadata["objective_thresholds"]
             self.assertEqual(obj_t[0], objective_thresholds[1])
             self.assertEqual(obj_t[1], objective_thresholds[0])
             self.assertEqual(len(obj_t), 2)
コード例 #9
0
def tune_fcn(
    n_sweeps,
    time_suffix,
    dataset_name,
    dataset,
    use_gpu,
    output_dir,
    model_seed,
    params_seed,
    verbose,
    skip_sweeps=None,
):
    n_head_units = RangeParameter(name="n_head_units", parameter_type=ParameterType.INT, lower=8, upper=10)
    n_tail_units = RangeParameter(name="n_tail_units", parameter_type=ParameterType.INT, lower=7, upper=9)
    order_constraint = OrderConstraint(
        lower_parameter=n_tail_units,
        upper_parameter=n_head_units,
    )

    search_space = SearchSpace(
        parameters=[
            n_head_units,
            n_tail_units,
            RangeParameter(name="n_head_layers", parameter_type=ParameterType.INT, lower=1, upper=2),
            RangeParameter(name="n_tail_layers", parameter_type=ParameterType.INT, lower=1, upper=4),
            ChoiceParameter(name="dropout", parameter_type=ParameterType.FLOAT, values=[0.0, 0.1, 0.2, 0.3]),
            RangeParameter(name="learning_rate", parameter_type=ParameterType.FLOAT, lower=1e-4, upper=1e-2, log_scale=True),
        ],
        parameter_constraints=[order_constraint]
    )


    sobol = get_sobol(search_space=search_space, seed=params_seed)
    sweeps = sobol.gen(n=n_sweeps).arms
    if skip_sweeps is not None:
        sweeps = sweeps[skip_sweeps:]

    for i, sweep in enumerate(sweeps):
        train_fcn(
            experiment_name="%s_%d_%s" % (dataset_name, i, time_suffix),
            dataset=dataset,
            batch_size=1024,
            device="cuda" if use_gpu else "cpu",
            report_frequency=100,
            epochs=float("inf"),
            output_dir=output_dir,
            model_seed=model_seed,
            verbose=verbose,
            **sweep.parameters,
        )
コード例 #10
0
 def test_sobol_GPEI(self):
     """Tests sobol + GPEI instantiation."""
     exp = get_branin_experiment()
     # Check that factory generates a valid sobol modelbridge.
     sobol = get_sobol(search_space=exp.search_space)
     self.assertIsInstance(sobol, RandomModelBridge)
     for _ in range(5):
         sobol_run = sobol.gen(n=1)
         exp.new_batch_trial().add_generator_run(sobol_run).run()
     # Check that factory generates a valid GP+EI modelbridge.
     exp.optimization_config = get_branin_optimization_config()
     gpei = get_GPEI(experiment=exp, data=exp.fetch_data())
     self.assertIsInstance(gpei, TorchModelBridge)
     gpei = get_GPEI(experiment=exp,
                     data=exp.fetch_data(),
                     search_space=exp.search_space)
     self.assertIsInstance(gpei, TorchModelBridge)
コード例 #11
0
ファイル: test_factory.py プロジェクト: jlin27/Ax
    def test_MTGP(self):
        """Tests MTGP instantiation."""
        # Test Multi-type MTGP
        exp = get_multi_type_experiment(add_trials=True)
        mtgp = get_MTGP(experiment=exp, data=exp.fetch_data())
        self.assertIsInstance(mtgp, TorchModelBridge)

        # Test Single-type MTGP
        exp = get_branin_experiment()
        # Check that factory generates a valid sobol modelbridge.
        sobol = get_sobol(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(5):
            sobol_run = sobol.gen(n=1)
            exp.new_batch_trial().add_generator_run(sobol_run).run()
        mtgp = get_MTGP(experiment=exp, data=exp.fetch_data())
        self.assertIsInstance(mtgp, TorchModelBridge)
コード例 #12
0
def tune_tab_transformer(
    n_sweeps,
    time_suffix,
    dataset_name,
    dataset,
    use_gpu,
    output_dir,
    model_seed,
    params_seed,
    verbose,
    skip_sweeps=None,
):
    search_space = SearchSpace(
        parameters=[
            ChoiceParameter(name="d_model", parameter_type=ParameterType.INT, values=[64, 128]),
            ChoiceParameter(name="n_tokens", parameter_type=ParameterType.INT, values=[8, 16]),
            ChoiceParameter(name="n_transformers", parameter_type=ParameterType.INT, values=[3, 5]),
            ChoiceParameter(name="dim_ff_factor", parameter_type=ParameterType.INT, values=[2, 4]),
            ChoiceParameter(name="dropout", parameter_type=ParameterType.FLOAT, values=[0.0, 0.1, 0.2, 0.3]),
            ChoiceParameter(name="mask", parameter_type=ParameterType.STRING, values=["full", "tree"]),
            ChoiceParameter(name="attention_function", parameter_type=ParameterType.STRING, values=["softmax", "entmax"]),
        ]
    )


    sobol = get_sobol(search_space=search_space, seed=params_seed)
    sweeps = sobol.gen(n=n_sweeps).arms
    if skip_sweeps is not None:
        sweeps = sweeps[skip_sweeps:]

    for i, sweep in enumerate(sweeps):
        train_tab_transformer(
            n_heads=1,
            experiment_name="%s_%d_%s" % (dataset_name, i, time_suffix),
            dataset=dataset,
            batch_size=1024,
            device="cuda" if use_gpu else "cpu",
            report_frequency=100,
            epochs=float("inf"),
            output_dir=output_dir,
            model_seed=model_seed,
            verbose=verbose,
            **sweep.parameters,
        )
コード例 #13
0
def tune_tabnet(
    n_sweeps,
    time_suffix,
    dataset_name,
    dataset,
    use_gpu,
    output_dir,
    model_seed,
    params_seed,
    verbose,
    skip_sweeps=None,
):
    search_space = SearchSpace(
        parameters=[
            ChoiceParameter(name="n_d", parameter_type=ParameterType.INT, values=[8, 16, 32, 64]),
            RangeParameter(name="n_steps", parameter_type=ParameterType.INT, lower=3, upper=10),
            RangeParameter(name="gamma", parameter_type=ParameterType.FLOAT, lower=1, upper=2),
            RangeParameter(name="n_independent", parameter_type=ParameterType.INT, lower=1, upper=5),
            RangeParameter(name="n_shared", parameter_type=ParameterType.INT, lower=1, upper=5),
            RangeParameter(name="learning_rate", parameter_type=ParameterType.FLOAT, lower=1e-3, upper=2e-2, log_scale=True),
            RangeParameter(name="lambda_sparse", parameter_type=ParameterType.FLOAT, lower=1e-5, upper=1e-3, log_scale=True),
            ChoiceParameter(name="mask_type", parameter_type=ParameterType.STRING, values=["sparsemax", "entmax"]),
        ]
    )

    sobol = get_sobol(search_space=search_space, seed=params_seed)
    sweeps = sobol.gen(n=n_sweeps).arms
    if skip_sweeps is not None:
        sweeps = sweeps[skip_sweeps:]

    for i, sweep in enumerate(sweeps):
        train_tabnet(
            experiment_name="%s_%d_%s" % (dataset_name, i, time_suffix),
            dataset=dataset,
            batch_size=1024,
            device="cuda" if use_gpu else "cpu",
            epochs=15,
            patience=5,
            output_dir=output_dir,
            model_seed=42,
            verbose=int(verbose),
            **sweep.parameters
        )
コード例 #14
0
    def test_status_quo_for_non_monolithic_data(self):
        exp = get_branin_experiment_with_multi_objective(with_status_quo=True)
        sobol_generator = get_sobol(search_space=exp.search_space, )
        sobol_run = sobol_generator.gen(n=5)
        exp.new_batch_trial(sobol_run).set_status_quo_and_optimize_power(
            status_quo=exp.status_quo).run()

        # create data where metrics vary in start and end times
        data = get_non_monolithic_branin_moo_data()

        bridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            experiment=exp,
            data=data,
            transforms=[],
        )
        self.assertEqual(bridge.status_quo.arm_name, "status_quo")
コード例 #15
0
ファイル: sparse.py プロジェクト: BCJuan/SpArSeMod
    def run_sparse(self):

        sparse_exp = SparseExperiment(self.epochs1, **self.__dict__)

        self.exp, self.data = sparse_exp.create_load_experiment()

        sobol = get_sobol(self.exp.search_space)
        sobol = self.run_model(self.r1, sobol, model_type="random")

        self.exp.optimization_config.objective.metrics[0].epochs = self.epochs2
        if self.arc:
            botorch = get_botorch_arc(experiment=self.exp, data=self.data)
        else:
            botorch = get_botorch(experiment=self.exp, data=self.data)
        botorch = self.run_model(self.r2, botorch, model_type="bo")

        if self.morphisms:
            self.pareto_arms = clean_models_return_pareto(
                self.data, self.models_path)
            self.develop_morphisms(botorch)
コード例 #16
0
ファイル: core_stubs.py プロジェクト: Balandat/Ax
def get_branin_with_multi_task(with_multi_objective: bool = False):
    exp = Experiment(
        name="branin_test_experiment",
        search_space=get_branin_search_space(),
        optimization_config=get_branin_multi_objective_optimization_config(
            has_objective_thresholds=True, )
        if with_multi_objective else get_branin_optimization_config(),
        runner=SyntheticRunner(),
        is_test=True,
    )

    exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0}, name="status_quo")

    sobol_generator = get_sobol(search_space=exp.search_space,
                                seed=TEST_SOBOL_SEED)
    sobol_run = sobol_generator.gen(n=5)
    exp.new_batch_trial(optimize_for_power=True).add_generator_run(sobol_run)
    not_none(exp.trials.get(0)).run()
    exp.new_batch_trial(optimize_for_power=True).add_generator_run(sobol_run)
    not_none(exp.trials.get(1)).run()

    return exp
コード例 #17
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_status_quo: bool = False,
) -> Experiment:
    exp = Experiment(
        name="branin_test_experiment",
        search_space=get_branin_search_space(),
        optimization_config=get_branin_optimization_config()
        if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0, "x2": 0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial().add_generator_run(sobol_run)

    return exp
コード例 #18
0
def tune_catboost(
    n_sweeps,
    time_suffix,
    dataset_name,
    dataset,
    use_gpu,
    output_dir,
    model_seed,
    params_seed,
    verbose,
    skip_sweeps=None,
):
    search_space = SearchSpace(parameters=[
        RangeParameter(name="learning_rate", parameter_type=ParameterType.FLOAT, lower=np.exp(-5), upper=1.0, log_scale=True),
        RangeParameter(name="l2_leaf_reg", parameter_type=ParameterType.FLOAT, lower=1, upper=10, log_scale=True),
        RangeParameter(name="subsample", parameter_type=ParameterType.FLOAT, lower=0, upper=1),
        RangeParameter(name="leaf_estimation_iterations", parameter_type=ParameterType.INT, lower=1, upper=10),
        RangeParameter(name="random_strength", parameter_type=ParameterType.INT, lower=1, upper=20),
    ])

    sobol = get_sobol(search_space=search_space, seed=params_seed)
    sweeps = sobol.gen(n=n_sweeps).arms
    if skip_sweeps is not None:
        sweeps = sweeps[skip_sweeps:]

    for i, sweep in enumerate(sweeps):
        train_catboost(
            max_trees=2048,
            experiment_name="%s_%d_%s" % (dataset_name, i, time_suffix),
            dataset=dataset,
            device="GPU" if use_gpu else "CPU",
            output_dir=output_dir,
            model_seed=model_seed,
            verbose=verbose,
            report_frequency=100,
            **sweep.parameters,
        )
コード例 #19
0
ファイル: test_factory.py プロジェクト: fatihbaltaci/Ax
    def test_MTGP(self):
        """Tests MTGP instantiation."""
        # Test Multi-type MTGP
        exp = get_multi_type_experiment(add_trials=True)
        mtgp = get_MTGP(experiment=exp, data=exp.fetch_data())
        self.assertIsInstance(mtgp, TorchModelBridge)

        # Test Single-type MTGP
        exp = get_branin_experiment()
        # Check that factory generates a valid sobol modelbridge.
        sobol = get_sobol(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(5):
            sobol_run = sobol.gen(n=1)
            exp.new_batch_trial().add_generator_run(sobol_run).run()
        mtgp = get_MTGP(experiment=exp,
                        is_multi_type=False,
                        data=exp.fetch_data())
        self.assertIsInstance(mtgp, TorchModelBridge)

        # Test wrong call of Multi-type MTGP. The type of the input experiment
        # should be MultiTypeExperiment.
        with self.assertRaises(ValueError):
            get_MTGP(experiment=exp, is_multi_type=True, data=exp.fetch_data())
コード例 #20
0
ファイル: core_stubs.py プロジェクト: tangzhenyu/ax
def get_simple_experiment_with_batch_trial() -> SimpleExperiment:
    experiment = get_simple_experiment()
    generator = get_sobol(experiment.search_space)
    generator_run = generator.gen(10)
    experiment.new_batch_trial(generator_run=generator_run)
    return experiment
コード例 #21
0
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys()))
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        outcome_constraints = [
            OutcomeConstraint(
                metric=exp.metrics["branin_a"],
                op=ComparisonOp.GEQ,
                bound=-40.0,
                relative=False,
            )
        ]
        search_space.add_parameter_constraints(param_constraints)
        exp.optimization_config.outcome_constraints = outcome_constraints
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                ))
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            oc = ckwargs["outcome_constraints"]
            self.assertTrue(torch.equal(oc[0], torch.tensor([[-1.0, 0.0]])))
            self.assertTrue(torch.equal(oc[1], torch.tensor([[45.0]])))
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.
                pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))
        # test using MTGP
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=5)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with self.assertRaises(ValueError):
            # Check that a ValueError is raised when MTGP is being used
            # and trial_index is not specified as a fixed features.
            # Note: this error is raised by StratifiedStandardizeY
            modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
            )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack(
            [df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))
コード例 #22
0
ファイル: test_managed_loop.py プロジェクト: zorrock/Ax
def get_experiment_data_sobol(experiment, data):
    return get_sobol(experiment.search_space)
コード例 #23
0
    def test_MTGP_NEHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        metrics = single_obj_exp.optimization_config.objective.metrics
        metrics[0].lower_is_better = True
        objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0], bound=0.0, relative=False)
        ]
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                objective_thresholds=objective_thresholds,
            )

        multi_obj_exp = get_branin_experiment_with_multi_objective(with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics[0], bound=0.0, relative=False, op=ComparisonOp.GEQ
            ),
            ObjectiveThreshold(
                metric=metrics[1], bound=0.0, relative=False, op=ComparisonOp.GEQ
            ),
        ]
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
            )

        multi_obj_exp.trials[0].run()
        sobol_generator = get_sobol(search_space=multi_obj_exp.search_space)
        sobol_run = sobol_generator.gen(n=3)
        multi_obj_exp.new_batch_trial(optimize_for_power=False).add_generator_run(
            sobol_run
        )
        multi_obj_exp.trials[1].run()
        mt_ehvi = get_MTGP_NEHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            objective_thresholds=multi_objective_thresholds,
            trial_index=1,
        )
        self.assertIsInstance(mt_ehvi, TorchModelBridge)
        self.assertIsInstance(mt_ehvi.model.model.models[0], MultiTaskGP)
        task_covar_factor = mt_ehvi.model.model.models[0].task_covar_module.covar_factor
        self.assertEqual(task_covar_factor.shape, torch.Size([2, 2]))
        mt_ehvi_run = mt_ehvi.gen(
            n=1, fixed_features=ObservationFeatures(parameters={}, trial_index=1)
        )
        self.assertEqual(len(mt_ehvi_run.arms), 1)

        # Bad index given
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
                trial_index=999,
            )

        # Multi-type + multi-objective experiment
        multi_type_multi_obj_exp = get_multi_type_experiment_with_multi_objective(
            add_trials=True
        )
        data = multi_type_multi_obj_exp.fetch_data()
        mt_ehvi = get_MTGP_NEHVI(
            experiment=multi_type_multi_obj_exp,
            data=data,
            objective_thresholds=multi_objective_thresholds,
        )
コード例 #24
0
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys())
        )
        data = exp.fetch_data()
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        search_space.add_parameter_constraints(param_constraints)
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                )
            )
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )
        # test using MTGP
        sobol_generator = get_sobol(
            search_space=exp.search_space,
            seed=TEST_SOBOL_SEED,
            # set initial position equal to the number of sobol arms generated
            # so far. This means that new sobol arms will complement the previous
            # arms in a space-filling fashion
            init_position=len(exp.arms_by_name) - 1,
        )
        sobol_run = sobol_generator.gen(n=2)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        torch.manual_seed(0)  # make model fitting deterministic
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack([df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )