Exemplo n.º 1
0
 def testNumArmsNoDeduplication(self):
     exp = Experiment(name="test_experiment",
                      search_space=get_search_space())
     arm = get_arm()
     exp.new_batch_trial().add_arm(arm)
     trial = exp.new_batch_trial().add_arm(arm)
     self.assertEqual(exp.sum_trial_sizes, 2)
     self.assertEqual(len(exp.arms_by_name), 1)
     trial.mark_arm_abandoned(trial.arms[0].name)
     self.assertEqual(exp.num_abandoned_arms, 1)
Exemplo n.º 2
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(
         name="test_experiment",
         search_space=get_search_space(),
         default_data_type=DataType.MAP_DATA,
     )
     self.assertEqual(empty_experiment.num_trials, 0)
     empty_experiment.add_tracking_metric(Metric(name="ax_test_metric"))
     self.assertTrue(empty_experiment.fetch_data().df.empty)
     empty_experiment.attach_data(get_map_data())
Exemplo n.º 3
0
def get_experiment() -> Experiment:
    return Experiment(
        name="test",
        search_space=get_search_space(),
        optimization_config=get_optimization_config(),
        status_quo=get_status_quo(),
        description="test description",
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )
Exemplo n.º 4
0
 def testTrackingMetricsMerge(self):
     # Tracking and optimization metrics should get merged
     # m1 is on optimization_config while m3 is not
     exp = Experiment(
         name="test2",
         search_space=get_search_space(),
         optimization_config=get_optimization_config(),
         tracking_metrics=[Metric(name="m1"), Metric(name="m3")],
     )
     self.assertEqual(len(exp.optimization_config.metrics) + 1, len(exp.metrics))
Exemplo n.º 5
0
    def test_best_point(
        self,
        _mock_gen,
        _mock_best_point,
        _mock_fit,
        _mock_predict,
        _mock_gen_arms,
        _mock_unwrap,
        _mock_obs_from_data,
    ):
        exp = Experiment(search_space=get_search_space_for_range_value(),
                         name="test")
        modelbridge = ArrayModelBridge(
            search_space=get_search_space_for_range_value(),
            model=NumpyModel(),
            transforms=[t1, t2],
            experiment=exp,
            data=Data(),
        )
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["Cast", "t1", "t2"])
        # _fit is mocked, which typically sets this.
        modelbridge.outcomes = ["a"]
        run = modelbridge.gen(
            n=1,
            optimization_config=OptimizationConfig(
                objective=Objective(metric=Metric("a"), minimize=False),
                outcome_constraints=[],
            ),
        )
        arm, predictions = run.best_arm_predictions
        self.assertEqual(arm.parameters, {})
        self.assertEqual(predictions[0], {"m": 1.0})
        self.assertEqual(predictions[1], {"m": {"m": 2.0}})
        # test check that optimization config is required
        with self.assertRaises(ValueError):
            run = modelbridge.gen(n=1, optimization_config=None)

        # test optimization config validation - raise error when
        # ScalarizedOutcomeConstraint contains a metric that is not in the outcomes
        with self.assertRaises(ValueError):
            run = modelbridge.gen(
                n=1,
                optimization_config=OptimizationConfig(
                    objective=Objective(metric=Metric("a"), minimize=False),
                    outcome_constraints=[
                        ScalarizedOutcomeConstraint(
                            metrics=[Metric("wrong_metric_name")],
                            weights=[1.0],
                            op=ComparisonOp.LEQ,
                            bound=0,
                        )
                    ],
                ),
            )
Exemplo n.º 6
0
def get_experiment_with_map_data_type():
    return Experiment(
        name="test_map_data",
        search_space=get_search_space(),
        optimization_config=get_map_optimization_config(),
        status_quo=get_status_quo(),
        description="test description",
        tracking_metrics=[MapMetric(name="tracking")],
        is_test=True,
        default_data_type=DataType.MAP_DATA,
    )
Exemplo n.º 7
0
def experiment_from_json(object_json: Dict[str, Any]) -> Experiment:
    """Load Ax Experiment from JSON."""
    experiment_info = _get_experiment_info(object_json)

    experiment = Experiment(
        **{k: object_from_json(v)
           for k, v in object_json.items()})
    experiment._arms_by_name = {}

    _load_experiment_info(exp=experiment, exp_info=experiment_info)
    return experiment
Exemplo n.º 8
0
 def testExperimentWithoutName(self):
     exp = Experiment(
         search_space=get_branin_search_space(),
         tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
         runner=SyntheticRunner(),
     )
     self.assertEqual("Experiment(None)", str(exp))
     batch = exp.new_batch_trial()
     batch.add_arms_and_weights(arms=get_branin_arms(n=5, seed=0))
     batch.run()
     self.assertEqual(batch.run_metadata, {"name": "0"})
Exemplo n.º 9
0
    def testEq(self):
        self.assertEqual(self.experiment, self.experiment)

        experiment2 = Experiment(
            name="test2",
            search_space=get_search_space(),
            optimization_config=get_optimization_config(),
            status_quo=get_arm(),
            description="test description",
        )
        self.assertNotEqual(self.experiment, experiment2)
Exemplo n.º 10
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(name="test_experiment",
                                   search_space=get_search_space())
     self.assertEqual(empty_experiment.num_trials, 0)
     with self.assertRaises(ValueError):
         empty_experiment.fetch_data()
     batch = empty_experiment.new_batch_trial()
     self.assertEqual(empty_experiment.num_trials, 1)
     with self.assertRaises(ValueError):
         batch.fetch_data()
     empty_experiment.add_tracking_metric(Metric(name="some_metric"))
     empty_experiment.attach_data(get_data())
     self.assertFalse(empty_experiment.fetch_data().df.empty)
Exemplo n.º 11
0
def _benchmark_replication_Dev_API(
    problem: BenchmarkProblem,
    method: GenerationStrategy,
    num_trials: int,
    experiment_name: str,
    batch_size: int = 1,
    raise_all_exceptions: bool = False,
    benchmark_trial: FunctionType = benchmark_trial,
    verbose_logging: bool = True,
    # Number of trials that need to fail for a replication to be considered failed.
    failed_trials_tolerated: int = 5,
    async_benchmark_options: Optional[AsyncBenchmarkOptions] = None,
) -> Tuple[Experiment, List[Exception]]:
    """Run a benchmark replication via the Developer API because the problem was
    set up with Ax classes (likely to allow for additional complexity like
    adding constraints or non-range parameters).
    """
    if async_benchmark_options is not None:
        raise NonRetryableBenchmarkingError(
            "`async_benchmark_options` not supported when using the Dev API."
        )

    exceptions = []
    experiment = Experiment(
        name=experiment_name,
        search_space=problem.search_space,
        optimization_config=problem.optimization_config,
        runner=SyntheticRunner(),
    )
    for trial_index in range(num_trials):
        try:
            gr = method.gen(experiment=experiment, n=batch_size)
            if batch_size == 1:
                trial = experiment.new_trial(generator_run=gr)
            else:
                assert batch_size > 1
                trial = experiment.new_batch_trial(generator_run=gr)
            trial.run()
            # TODO[T94059549]: Rm 3 lines below when attaching data in fetch is fixed.
            data = benchmark_trial(experiment=experiment, trial_index=trial_index)
            if not data.df.empty:
                experiment.attach_data(data=data)
        except Exception as err:  # TODO[T53975770]: test
            if raise_all_exceptions:
                raise
            exceptions.append(err)
        if len(exceptions) > failed_trials_tolerated:
            raise RuntimeError(  # TODO[T53975770]: test
                f"More than {failed_trials_tolerated} failed for {experiment_name}."
            )
    return experiment, exceptions
Exemplo n.º 12
0
def get_experiment_with_scalarized_objective() -> Experiment:
    objective = get_scalarized_objective()
    outcome_constraints = [get_outcome_constraint()]
    optimization_config = OptimizationConfig(
        objective=objective, outcome_constraints=outcome_constraints)
    return Experiment(
        name="test_experiment_scalarized_objective",
        search_space=get_search_space(),
        optimization_config=optimization_config,
        status_quo=get_status_quo(),
        description="test experiment with scalarized objective",
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )
Exemplo n.º 13
0
    def test_fetch_as_class(self):
        class MyMetric(Metric):
            @property
            def fetch_multi_group_by_metric(self) -> Type[Metric]:
                return Metric

        m = MyMetric(name="test_metric")
        exp = Experiment(
            name="test",
            search_space=get_branin_search_space(),
            tracking_metrics=[m],
            runner=SyntheticRunner(),
        )
        self.assertEqual(exp._metrics_by_class(), {Metric: [m]})
Exemplo n.º 14
0
def get_experiment_with_multi_objective() -> Experiment:
    optimization_config = get_multi_objective_optimization_config()

    exp = Experiment(
        name="test_experiment_multi_objective",
        search_space=get_branin_search_space(),
        optimization_config=optimization_config,
        description="test experiment with multi objective",
        runner=SyntheticRunner(),
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )

    return exp
Exemplo n.º 15
0
 def test_importances(
     self,
     _mock_feature_importances,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(get_search_space_for_range_value(), "test")
     modelbridge = ArrayModelBridge(get_search_space_for_range_value(),
                                    NumpyModel(), [t1, t2], exp, 0)
     modelbridge.outcomes = ["a", "b"]
     self.assertEqual(modelbridge.feature_importances("a"), {"x": [1.0]})
     self.assertEqual(modelbridge.feature_importances("b"), {"x": [2.0]})
Exemplo n.º 16
0
    def _setupBraninExperiment(self, n: int) -> Experiment:
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        batch.add_arms_and_weights(arms=get_branin_arms(n=n, seed=0))
        batch.run()

        batch_2 = exp.new_batch_trial()
        batch_2.add_arms_and_weights(arms=get_branin_arms(n=3 * n, seed=1))
        batch_2.run()
        return exp
Exemplo n.º 17
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_trial: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    with_str_choice_param: bool = False,
    search_space: Optional[SearchSpace] = None,
    minimize: bool = False,
    named: bool = True,
    with_completed_trial: bool = False,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
        with_str_choice_param=with_str_choice_param,
    )
    exp = Experiment(
        name="branin_test_experiment" if named else None,
        search_space=search_space,
        optimization_config=get_branin_optimization_config(
            minimize=minimize) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    if with_trial or with_completed_trial:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=1)
        trial = exp.new_trial(generator_run=sobol_run)

        if with_completed_trial:
            trial.mark_running(no_runner_required=True)
            exp.attach_data(
                get_branin_data(trials=[trial]))  # Add data for one trial
            trial.mark_completed()

    return exp
Exemplo n.º 18
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(name="test_experiment",
                                   search_space=get_search_space())
     self.assertEqual(empty_experiment.num_trials, 0)
     with self.assertRaises(ValueError):
         empty_experiment.fetch_data()
     batch = empty_experiment.new_batch_trial()
     batch.mark_running(no_runner_required=True)
     self.assertEqual(empty_experiment.num_trials, 1)
     with self.assertRaises(ValueError):
         batch.fetch_data()
     empty_experiment.add_tracking_metric(Metric(name="ax_test_metric"))
     self.assertTrue(empty_experiment.fetch_data().df.empty)
     empty_experiment.attach_data(get_data())
     batch.mark_completed()
     self.assertFalse(empty_experiment.fetch_data().df.empty)
Exemplo n.º 19
0
def get_experiment_with_multi_objective() -> Experiment:
    objective = get_multi_objective()
    outcome_constraints = [get_outcome_constraint()]
    optimization_config = OptimizationConfig(
        objective=objective, outcome_constraints=outcome_constraints)

    exp = Experiment(
        name="test_experiment_multi_objective",
        search_space=get_branin_search_space(),
        optimization_config=optimization_config,
        description="test experiment with multi objective",
        runner=SyntheticRunner(),
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )

    return exp
Exemplo n.º 20
0
def experiment_from_json(object_json: Dict[str, Any]) -> Experiment:
    """Load Ax Experiment from JSON."""
    time_created_json = object_json.pop("time_created")
    trials_json = object_json.pop("trials")
    experiment_type_json = object_json.pop("experiment_type")
    data_by_trial_json = object_json.pop("data_by_trial")
    experiment = Experiment(**{k: object_from_json(v) for k, v in object_json.items()})
    experiment._time_created = object_from_json(time_created_json)
    experiment._trials = trials_from_json(experiment, trials_json)
    for trial in experiment._trials.values():
        for arm in trial.arms:
            experiment._arms_by_signature[arm.signature] = arm
    if experiment.status_quo is not None:
        sq_sig = experiment.status_quo.signature
        experiment._arms_by_signature[sq_sig] = experiment.status_quo
    experiment._experiment_type = object_from_json(experiment_type_json)
    experiment._data_by_trial = data_from_json(data_by_trial_json)
    return experiment
Exemplo n.º 21
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    objective_name: Optional[str] = None,
    minimize: bool = False,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
) -> Experiment:
    """Instantiation wrapper that allows for creation of SimpleExperiment
    without importing or instantiating any Ax classes."""

    exp_parameters: List[Parameter] = [
        parameter_from_json(p) for p in parameters
    ]
    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)
    parameter_map = {p.name: p for p in exp_parameters}
    ocs = [outcome_constraint_from_str(c) for c in (outcome_constraints or [])]
    if status_quo_arm is None and any(oc.relative for oc in ocs):
        raise ValueError(
            "Must set status_quo to have relative outcome constraints.")
    return Experiment(
        name=name,
        search_space=SearchSpace(
            parameters=exp_parameters,
            parameter_constraints=None if parameter_constraints is None else [
                constraint_from_str(c, parameter_map)
                for c in parameter_constraints
            ],
        ),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=Metric(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=ocs,
        ),
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
    )
Exemplo n.º 22
0
def _benchmark_replication_Dev_API(
    problem: BenchmarkProblem,
    method: GenerationStrategy,
    num_trials: int,
    experiment_name: str,
    batch_size: int = 1,
    raise_all_exceptions: bool = False,
    benchmark_trial: FunctionType = benchmark_trial,
    verbose_logging: bool = True,
    # Number of trials that need to fail for a replication to be considered failed.
    failed_trials_tolerated: int = 5,
) -> Tuple[Experiment, List[Exception]]:
    """Run a benchmark replication via the Developer API because the problem was
    set up with Ax classes (likely to allow for additional complexity like
    adding constraints or non-range parameters).
    """
    exceptions = []
    experiment = Experiment(
        name=experiment_name,
        search_space=problem.search_space,
        optimization_config=problem.optimization_config,
        runner=SyntheticRunner(),
    )
    for trial_index in range(num_trials):
        try:
            gr = method.gen(experiment=experiment, n=batch_size)
            if batch_size == 1:
                trial = experiment.new_trial(generator_run=gr)
            else:
                assert batch_size > 1
                trial = experiment.new_batch_trial(generator_run=gr)
            trial.run()
            benchmark_trial(experiment=experiment, trial_index=trial_index)
            trial.mark_completed()
        except Exception as err:  # TODO[T53975770]: test
            if raise_all_exceptions:
                raise
            exceptions.append(err)
        if len(exceptions) > failed_trials_tolerated:
            raise RuntimeError(  # TODO[T53975770]: test
                f"More than {failed_trials_tolerated} failed for {experiment_name}."
            )
    return experiment, exceptions
Exemplo n.º 23
0
    def _init_experiment_from_sqa(self,
                                  experiment_sqa: SQAExperiment) -> Experiment:
        """First step of conversion within experiment_from_sqa."""
        opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa(
            metrics_sqa=experiment_sqa.metrics)
        search_space = self.search_space_from_sqa(
            parameters_sqa=experiment_sqa.parameters,
            parameter_constraints_sqa=experiment_sqa.parameter_constraints,
        )
        if search_space is None:
            raise SQADecodeError(  # pragma: no cover
                "Experiment SearchSpace cannot be None.")
        status_quo = (Arm(
            parameters=experiment_sqa.status_quo_parameters,
            name=experiment_sqa.status_quo_name,
        ) if experiment_sqa.status_quo_parameters is not None else None)
        if len(experiment_sqa.runners) == 0:
            runner = None
        elif len(experiment_sqa.runners) == 1:
            runner = self.runner_from_sqa(experiment_sqa.runners[0])
        else:
            raise ValueError(  # pragma: no cover
                "Multiple runners on experiment "
                "only supported for MultiTypeExperiment.")

        # `experiment_sqa.properties` is `sqlalchemy.ext.mutable.MutableDict`
        # so need to convert it to regular dict.
        properties = dict(experiment_sqa.properties or {})
        default_data_type = experiment_sqa.default_data_type
        return Experiment(
            name=experiment_sqa.name,
            description=experiment_sqa.description,
            search_space=search_space,
            optimization_config=opt_config,
            tracking_metrics=tracking_metrics,
            runner=runner,
            status_quo=status_quo,
            is_test=experiment_sqa.is_test,
            properties=properties,
            default_data_type=default_data_type,
        )
Exemplo n.º 24
0
def get_branin_experiment_with_timestamp_map_metric(
    rate: Optional[float] = None,
    incremental: Optional[bool] = False,
):
    metric_cls = (
        BraninTimestampMapMetric
        if not incremental
        else BraninIncrementalTimestampMapMetric
    )
    return Experiment(
        name="branin_with_timestamp_map_metric",
        search_space=get_branin_search_space(),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=metric_cls(name="branin", param_names=["x1", "x2"], rate=rate),
                minimize=True,
            )
        ),
        tracking_metrics=[metric_cls(name="b", param_names=["x1", "x2"])],
        runner=SyntheticRunner(),
        default_data_type=DataType.MAP_DATA,
    )
Exemplo n.º 25
0
def get_branin_with_multi_task(with_multi_objective: bool = False):
    exp = Experiment(
        name="branin_test_experiment",
        search_space=get_branin_search_space(),
        optimization_config=get_branin_multi_objective_optimization_config(
            has_objective_thresholds=True, )
        if with_multi_objective else get_branin_optimization_config(),
        runner=SyntheticRunner(),
        is_test=True,
    )

    exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0}, name="status_quo")

    sobol_generator = get_sobol(search_space=exp.search_space,
                                seed=TEST_SOBOL_SEED)
    sobol_run = sobol_generator.gen(n=5)
    exp.new_batch_trial(optimize_for_power=True).add_generator_run(sobol_run)
    not_none(exp.trials.get(0)).run()
    exp.new_batch_trial(optimize_for_power=True).add_generator_run(sobol_run)
    not_none(exp.trials.get(1)).run()

    return exp
Exemplo n.º 26
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_trial: bool = False,
    with_status_quo: bool = False,
    with_fidelity_parameter: bool = False,
    with_choice_parameter: bool = False,
    search_space: Optional[SearchSpace] = None,
    minimize: bool = False,
) -> Experiment:
    search_space = search_space or get_branin_search_space(
        with_fidelity_parameter=with_fidelity_parameter,
        with_choice_parameter=with_choice_parameter,
    )
    exp = Experiment(
        name="branin_test_experiment",
        search_space=search_space,
        optimization_config=get_branin_optimization_config(
            minimize=minimize) if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial(
            optimize_for_power=with_status_quo).add_generator_run(sobol_run)

    if with_trial:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=1)
        exp.new_trial(generator_run=sobol_run)

    return exp
Exemplo n.º 27
0
Arquivo: decoder.py Projeto: bitnot/Ax
def experiment_from_json(object_json: Dict[str, Any]) -> Experiment:
    """Load Ax Experiment from JSON."""
    time_created_json = object_json.pop("time_created")
    trials_json = object_json.pop("trials")
    experiment_type_json = object_json.pop("experiment_type")
    data_by_trial_json = object_json.pop("data_by_trial")
    experiment = Experiment(
        **{k: object_from_json(v)
           for k, v in object_json.items()})
    experiment._time_created = object_from_json(time_created_json)
    experiment._trials = trials_from_json(experiment, trials_json)
    experiment._arms_by_name = {}
    for trial in experiment._trials.values():
        for arm in trial.arms:
            experiment._register_arm(arm)
        if trial.ttl_seconds is not None:
            experiment._trials_have_ttl = True
    if experiment.status_quo is not None:
        sq = not_none(experiment.status_quo)
        experiment._register_arm(sq)
    experiment._experiment_type = object_from_json(experiment_type_json)
    experiment._data_by_trial = data_from_json(data_by_trial_json)
    return experiment
Exemplo n.º 28
0
def get_branin_experiment_with_timestamp_map_metric(
    with_status_quo: bool = False,
    rate: Optional[float] = None,
) -> Experiment:
    exp = Experiment(
        name="branin_with_timestamp_map_metric",
        search_space=get_branin_search_space(),
        optimization_config=OptimizationConfig(objective=Objective(
            metric=BraninTimestampMapMetric(
                name="branin_map", param_names=["x1", "x2"], rate=rate),
            minimize=True,
        )),
        tracking_metrics=[
            BraninMetric(name="branin", param_names=["x1", "x2"])
        ],
        runner=SyntheticRunner(),
        default_data_type=DataType.MAP_DATA,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})

    return exp
Exemplo n.º 29
0
def get_branin_experiment(
    has_optimization_config: bool = True,
    with_batch: bool = False,
    with_status_quo: bool = False,
) -> Experiment:
    exp = Experiment(
        name="branin_test_experiment",
        search_space=get_branin_search_space(),
        optimization_config=get_branin_optimization_config()
        if has_optimization_config else None,
        runner=SyntheticRunner(),
        is_test=True,
    )

    if with_status_quo:
        exp.status_quo = Arm(parameters={"x1": 0, "x2": 0})

    if with_batch:
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=15)
        exp.new_batch_trial().add_generator_run(sobol_run)

    return exp
Exemplo n.º 30
0
    def test_validation(self):
        # num_trials can be positive or -1.
        with self.assertRaises(UserInputError):
            GenerationStrategy(steps=[
                GenerationStep(model=Models.SOBOL, num_trials=5),
                GenerationStep(model=Models.GPEI, num_trials=-10),
            ])

        # only last num_trials can be -1.
        with self.assertRaises(UserInputError):
            GenerationStrategy(steps=[
                GenerationStep(model=Models.SOBOL, num_trials=-1),
                GenerationStep(model=Models.GPEI, num_trials=10),
            ])

        exp = Experiment(
            name="test",
            search_space=SearchSpace(parameters=[get_choice_parameter()]))
        factorial_thompson_generation_strategy = GenerationStrategy(steps=[
            GenerationStep(model=Models.FACTORIAL, num_trials=1),
            GenerationStep(model=Models.THOMPSON, num_trials=2),
        ])
        self.assertTrue(
            factorial_thompson_generation_strategy._uses_registered_models)
        self.assertFalse(
            factorial_thompson_generation_strategy.uses_non_registered_models)
        with self.assertRaises(ValueError):
            factorial_thompson_generation_strategy.gen(exp)
        self.assertEqual(
            GenerationStep(model=sum, num_trials=1).model_name, "sum")
        with self.assertRaisesRegex(UserInputError,
                                    "Maximum parallelism should be"):
            GenerationStrategy(steps=[
                GenerationStep(
                    model=Models.SOBOL, num_trials=5, max_parallelism=-1),
                GenerationStep(model=Models.GPEI, num_trials=-1),
            ])