示例#1
0
    def test_add_tracking_metrics(self):
        experiment = make_experiment(
            parameters=[{
                "name": "x",
                "type": "range",
                "bounds": [0, 1]
            }],
            tracking_metric_names=None,
        )
        self.assertDictEqual(experiment._tracking_metrics, {})

        metrics_names = ["metric_1", "metric_2"]
        experiment = make_experiment(
            parameters=[{
                "name": "x",
                "type": "range",
                "bounds": [0, 1]
            }],
            tracking_metric_names=metrics_names,
        )
        self.assertDictEqual(
            experiment._tracking_metrics,
            {
                metric_name: Metric(name=metric_name)
                for metric_name in metrics_names
            },
        )
 def test_objective_validation(self):
     with self.assertRaisesRegex(UnsupportedError, "Ambiguous objective definition"):
         make_experiment(
             parameters={"name": "x", "type": "range", "bounds": [0, 1]},
             objective_name="branin",
             objectives={"branin": "minimize", "currin": "maximize"},
         )
示例#3
0
 def with_evaluation_function(
     parameters: List[TParameterRepresentation],
     evaluation_function: TEvaluationFunction,
     experiment_name: Optional[str] = None,
     objective_name: Optional[str] = None,
     minimize: bool = False,
     parameter_constraints: Optional[List[str]] = None,
     outcome_constraints: Optional[List[str]] = None,
     total_trials: int = 20,
     arms_per_trial: int = 1,
     wait_time: int = 0,
     random_seed: Optional[int] = None,
     generation_strategy: Optional[GenerationStrategy] = None,
 ) -> "OptimizationLoop":
     """Constructs a synchronous `OptimizationLoop` using an evaluation
     function."""
     experiment = make_experiment(
         name=experiment_name,
         parameters=parameters,
         objective_name=objective_name,
         minimize=minimize,
         parameter_constraints=parameter_constraints,
         outcome_constraints=outcome_constraints,
     )
     return OptimizationLoop(
         experiment=experiment,
         total_trials=total_trials,
         arms_per_trial=arms_per_trial,
         random_seed=random_seed,
         wait_time=wait_time,
         generation_strategy=generation_strategy,
         evaluation_function=evaluation_function,
     )
示例#4
0
    def create_experiment(
        self,
        parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],
        name: Optional[str] = None,
        objective_name: Optional[str] = None,
        minimize: bool = False,
        parameter_constraints: Optional[List[str]] = None,
        outcome_constraints: Optional[List[str]] = None,
        status_quo: Optional[TParameterization] = None,
    ) -> None:
        """Create a new experiment and save it if DBSettings available.

        Args:
            parameters: List of dictionaries representing parameters in the
                experiment search space. Required elements in the dictionaries
                are: "name" (name of this parameter, string), "type" (type of the
                parameter: "range", "fixed", or "choice", string), and "bounds"
                for range parameters (list of two values, lower bound first),
                "values" for choice parameters (list of values), and "value" for
                fixed parameters (single value).
            objective: Name of the metric used as objective in this experiment.
                This metric must be present in `raw_data` argument to `complete_trial`.
            name: Name of the experiment to be created.
            minimize: Whether this experiment represents a minimization problem.
            parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "x3 + x4 + x5 >= 2". For sum
                constraints, any number of arguments is accepted, and acceptable
                operators are "<=" and ">=".
            outcome_constraints: List of string representation of outcome
                constraints of form "metric_name >= bound", like "m1 <= 3."
            status_quo: Parameterization of the current state of the system.
                If set, this will be added to each trial to be evaluated alongside
                test configurations.
        """
        if self.db_settings and not name:
            raise ValueError(  # pragma: no cover
                "Must give the experiment a name if `db_settings` is not None."
            )

        self._experiment = make_experiment(
            name=name,
            parameters=parameters,
            objective_name=objective_name,
            minimize=minimize,
            parameter_constraints=parameter_constraints,
            outcome_constraints=outcome_constraints,
            status_quo=status_quo,
        )
        if self._generation_strategy is None:
            self._generation_strategy = choose_generation_strategy(
                search_space=self._experiment.search_space,
                enforce_sequential_optimization=self.
                _enforce_sequential_optimization,
                random_seed=self._random_seed,
            )
        self._save_experiment_and_generation_strategy_to_db_if_possible()
示例#5
0
    def create_experiment(
        self,
        parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],
        name: Optional[str] = None,
        objective_name: Optional[str] = None,
        minimize: bool = False,
        parameter_constraints: Optional[List[str]] = None,
        outcome_constraints: Optional[List[str]] = None,
    ) -> None:
        """Create a new experiment and save it if DBSettings available.

        Args:
            parameters: List of dictionaries representing parameters in the
                experiment search space. Required elements in the dictionaries
                are: "name" (name of this parameter, string), "type" (type of the
                parameter: "range", "fixed", or "choice", string), and "bounds"
                for range parameters (list of two values, lower bound first),
                "values" for choice parameters (list of values), and "value" for
                fixed parameters (single value).
            objective: Name of the metric used as objective in this experiment.
                This metric must be present in `raw_data` argument to `log_data`.
            name: Name of the experiment to be created.
            minimize: Whether this experiment represents a minimization problem.
            parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "x3 + x4 >= 2".
            outcome_constraints: List of string representation of outcome
                constraints of form "metric_name >= bound", like "m1 <= 3."
        """
        if self.db_settings and not name:
            raise ValueError(  # pragma: no cover
                "Must give the experiment a name if `db_settings` is not None."
            )

        self._experiment = make_experiment(
            name=name,
            parameters=parameters,
            objective_name=objective_name,
            minimize=minimize,
            parameter_constraints=parameter_constraints,
            outcome_constraints=outcome_constraints,
        )
        if self.generation_strategy is None:
            self.generation_strategy = choose_generation_strategy(
                search_space=self._experiment.search_space,
                enforce_sequential_optimization=self.
                _enforce_sequential_optimization,
            )
        self._save_experiment_if_possible()
def setup_ax_experiment_optimizer(data, ax_search_domain, fixed_gbm_params,
                                  control_arm_params):

    ax_experiment = make_experiment(
        name="GBM param optimisation",
        parameters=ax_search_domain,
        minimize=True,
        status_quo=control_arm_params,
    )

    ax_trial_evaluation_func = partial(score_gbm_configuration, data,
                                       fixed_gbm_params)

    gbm_mae = construct_ax_metric(ax_trial_evaluation_func, 'gbm_mae')

    ax_experiment.optimization_config = OptimizationConfig(objective=Objective(
        metric=gbm_mae,
        minimize=True,
    ), )

    ax_experiment.runner = SimpleRunner()

    return ax_experiment
示例#7
0
    def create_experiment(
        self,
        parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],
        name: Optional[str] = None,
        objective_name: Optional[str] = None,
        minimize: bool = False,
        parameter_constraints: Optional[List[str]] = None,
        outcome_constraints: Optional[List[str]] = None,
        status_quo: Optional[TParameterization] = None,
        overwrite_existing_experiment: bool = False,
        experiment_type: Optional[str] = None,
        choose_generation_strategy_kwargs: Optional[Dict[str, Any]] = None,
    ) -> None:
        """Create a new experiment and save it if DBSettings available.

        Args:
            parameters: List of dictionaries representing parameters in the
                experiment search space. Required elements in the dictionaries
                are: "name" (name of this parameter, string), "type" (type of the
                parameter: "range", "fixed", or "choice", string), and "bounds"
                for range parameters (list of two values, lower bound first),
                "values" for choice parameters (list of values), and "value" for
                fixed parameters (single value).
            objective: Name of the metric used as objective in this experiment.
                This metric must be present in `raw_data` argument to `complete_trial`.
            name: Name of the experiment to be created.
            minimize: Whether this experiment represents a minimization problem.
            parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
            outcome_constraints: List of string representation of outcome
                constraints of form "metric_name >= bound", like "m1 <= 3."
            status_quo: Parameterization of the current state of the system.
                If set, this will be added to each trial to be evaluated alongside
                test configurations.
            overwrite_existing_experiment: If an experiment has already been set
                on this `AxClient` instance, whether to reset it to the new one.
                If overwriting the experiment, generation strategy will be
                re-selected for the new experiment and restarted.
                To protect experiments in production, one cannot overwrite existing
                experiments if the experiment is already stored in the database,
                regardless of the value of `overwrite_existing_experiment`.
            choose_generation_strategy_kwargs: Keyword arguments to pass to
                `choose_generation_strategy` function which determines what
                generation strategy should be used when none was specified on init.
        """
        if self.db_settings_set and not name:
            raise ValueError(  # pragma: no cover
                "Must give the experiment a name if `db_settings` is not None."
            )
        if self.db_settings_set:
            experiment_id, _ = self._get_experiment_and_generation_strategy_db_id(
                experiment_name=not_none(name)
            )
            if experiment_id:
                raise ValueError(
                    f"Experiment {name} already exists in the database. "
                    "To protect experiments that are running in production, "
                    "overwriting stored experiments is not allowed. To "
                    "start a new experiment and store it, change the "
                    "experiment's name."
                )
        if self._experiment is not None:
            if overwrite_existing_experiment:
                exp_name = self.experiment._name or "untitled"
                new_exp_name = name or "untitled"
                logger.info(
                    f"Overwriting existing experiment ({exp_name}) on this client "
                    f"with new experiment ({new_exp_name}) and restarting the "
                    "generation strategy."
                )
                self._generation_strategy = None
            else:
                raise ValueError(
                    "Experiment already created for this client instance. "
                    "Set the `overwrite_existing_experiment` to `True` to overwrite "
                    "with new experiment."
                )

        self._experiment = make_experiment(
            name=name,
            parameters=parameters,
            objective_name=objective_name,
            minimize=minimize,
            parameter_constraints=parameter_constraints,
            outcome_constraints=outcome_constraints,
            status_quo=status_quo,
            experiment_type=experiment_type,
        )

        try:
            self._save_experiment_to_db_if_possible(
                experiment=self.experiment,
                suppress_all_errors=self._suppress_storage_errors,
            )
        except Exception:
            # Unset the experiment on this `AxClient` instance if encountered and
            # raising an error from saving the experiment, to avoid a case where
            # overall `create_experiment` call fails with a storage error, but
            # `self._experiment` is still set and user has to specify the
            # `ooverwrite_existing_experiment` kwarg to re-attempt exp. creation.
            self._experiment = None
            raise

        self._set_generation_strategy(
            choose_generation_strategy_kwargs=choose_generation_strategy_kwargs
        )
        self._save_generation_strategy_to_db_if_possible(
            generation_strategy=self.generation_strategy,
            suppress_all_errors=self._suppress_storage_errors,
        )
示例#8
0
    def create_experiment(
        self,
        parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],
        name: Optional[str] = None,
        objective_name: Optional[str] = None,
        minimize: bool = False,
        parameter_constraints: Optional[List[str]] = None,
        outcome_constraints: Optional[List[str]] = None,
        status_quo: Optional[TParameterization] = None,
        overwrite_existing_experiment: bool = False,
        experiment_type: Optional[str] = None,
        choose_generation_strategy_kwargs: Optional[Dict[str, Any]] = None,
    ) -> None:
        """Create a new experiment and save it if DBSettings available.

        Args:
            parameters: List of dictionaries representing parameters in the
                experiment search space. Required elements in the dictionaries
                are: "name" (name of this parameter, string), "type" (type of the
                parameter: "range", "fixed", or "choice", string), and "bounds"
                for range parameters (list of two values, lower bound first),
                "values" for choice parameters (list of values), and "value" for
                fixed parameters (single value).
            objective: Name of the metric used as objective in this experiment.
                This metric must be present in `raw_data` argument to `complete_trial`.
            name: Name of the experiment to be created.
            minimize: Whether this experiment represents a minimization problem.
            parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
            outcome_constraints: List of string representation of outcome
                constraints of form "metric_name >= bound", like "m1 <= 3."
            status_quo: Parameterization of the current state of the system.
                If set, this will be added to each trial to be evaluated alongside
                test configurations.
            overwrite_existing_experiment: If an experiment has already been set
                on this `AxClient` instance, whether to reset it to the new one.
                If overwriting the experiment, generation strategy will be
                re-selected for the new experiment and restarted.
            choose_generation_strategy_kwargs: Keyword arguments to pass to
                `choose_generation_strategy` function which determines what
                generation strategy should be used when none was specified on init.
        """
        if self.db_settings and not name:
            raise ValueError(  # pragma: no cover
                "Must give the experiment a name if `db_settings` is not None."
            )
        if self.db_settings:
            existing = None
            try:
                existing, _ = load_experiment_and_generation_strategy(
                    experiment_name=not_none(name),
                    db_settings=self.db_settings)
            except ValueError:  # Experiment does not exist, nothing to do.
                pass
            if existing and overwrite_existing_experiment:
                logger.info(f"Overwriting existing experiment {name}.")
            elif existing:
                raise ValueError(
                    f"Experiment {name} exists; set the `overwrite_existing_"
                    "experiment` to `True` to overwrite with new experiment "
                    "or use `ax_client.load_experiment_from_database` to "
                    "continue an existing experiment.")
        if self._experiment is not None:
            if overwrite_existing_experiment:
                exp_name = self.experiment._name or "untitled"
                new_exp_name = name or "untitled"
                logger.info(
                    f"Overwriting existing experiment ({exp_name}) on this client "
                    f"with new experiment ({new_exp_name}) and restarting the "
                    "generation strategy.")
                self._generation_strategy = None
            else:
                raise ValueError(
                    f"Experiment already created for this client instance. "
                    "Set the `overwrite_existing_experiment` to `True` to overwrite "
                    "with new experiment.")

        self._experiment = make_experiment(
            name=name,
            parameters=parameters,
            objective_name=objective_name,
            minimize=minimize,
            parameter_constraints=parameter_constraints,
            outcome_constraints=outcome_constraints,
            status_quo=status_quo,
            experiment_type=experiment_type,
        )
        choose_generation_strategy_kwargs = choose_generation_strategy_kwargs or {}
        random_seed = choose_generation_strategy_kwargs.pop(
            "random_seed", self._random_seed)
        enforce_sequential_optimization = choose_generation_strategy_kwargs.pop(
            "enforce_sequential_optimization",
            self._enforce_sequential_optimization)
        if self._generation_strategy is None:
            self._generation_strategy = choose_generation_strategy(
                search_space=not_none(self._experiment).search_space,
                enforce_sequential_optimization=enforce_sequential_optimization,
                random_seed=random_seed,
                **choose_generation_strategy_kwargs,
            )
        self._save_experiment_and_generation_strategy_to_db_if_possible(
            overwrite_existing_experiment=True)
示例#9
0
    def create_experiment(
        self,
        parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],
        name: Optional[str] = None,
        objective_name: Optional[str] = None,
        minimize: bool = False,
        parameter_constraints: Optional[List[str]] = None,
        outcome_constraints: Optional[List[str]] = None,
        status_quo: Optional[TParameterization] = None,
        overwrite_existing_experiment: bool = False,
        experiment_type: Optional[str] = None,
    ) -> None:
        """Create a new experiment and save it if DBSettings available.

        Args:
            parameters: List of dictionaries representing parameters in the
                experiment search space. Required elements in the dictionaries
                are: "name" (name of this parameter, string), "type" (type of the
                parameter: "range", "fixed", or "choice", string), and "bounds"
                for range parameters (list of two values, lower bound first),
                "values" for choice parameters (list of values), and "value" for
                fixed parameters (single value).
            objective: Name of the metric used as objective in this experiment.
                This metric must be present in `raw_data` argument to `complete_trial`.
            name: Name of the experiment to be created.
            minimize: Whether this experiment represents a minimization problem.
            parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "x3 + x4 + x5 >= 2". For sum
                constraints, any number of arguments is accepted, and acceptable
                operators are "<=" and ">=".
            outcome_constraints: List of string representation of outcome
                constraints of form "metric_name >= bound", like "m1 <= 3."
            status_quo: Parameterization of the current state of the system.
                If set, this will be added to each trial to be evaluated alongside
                test configurations.
            overwrite_existing_experiment: If `DBSettings` were provided on
                instantiation and the experiment being created has the same name
                as some experiment already stored, whether to overwrite the
                existing experiment. Defaults to False.
        """
        if self.db_settings and not name:
            raise ValueError(  # pragma: no cover
                "Must give the experiment a name if `db_settings` is not None."
            )
        if self.db_settings:
            existing = None
            try:
                existing, _ = load_experiment_and_generation_strategy(
                    experiment_name=not_none(name),
                    db_settings=self.db_settings)
            except ValueError:  # Experiment does not exist, nothing to do.
                pass
            if existing and overwrite_existing_experiment:
                logger.info(f"Overwriting existing experiment {name}.")
            elif existing:
                raise ValueError(
                    f"Experiment {name} exists; set the `overwrite_existing_"
                    "experiment` to `True` to overwrite with new experiment "
                    "or use `ax_client.load_experiment_from_database` to "
                    "continue an existing experiment.")

        self._experiment = make_experiment(
            name=name,
            parameters=parameters,
            objective_name=objective_name,
            minimize=minimize,
            parameter_constraints=parameter_constraints,
            outcome_constraints=outcome_constraints,
            status_quo=status_quo,
            experiment_type=experiment_type,
        )
        if self._generation_strategy is None:
            self._generation_strategy = choose_generation_strategy(
                search_space=self._experiment.search_space,
                enforce_sequential_optimization=self.
                _enforce_sequential_optimization,
                random_seed=self._random_seed,
            )
        self._save_experiment_and_generation_strategy_to_db_if_possible(
            overwrite_existing_experiment=True)