Ejemplo n.º 1
0
 def test_raw_data_is_not_dict_of_dicts(self):
     with self.assertRaises(ValueError):
         raw_data_to_evaluation(
             raw_data={"arm_0": {
                 "objective_a": 6
             }},
             metric_names=["objective_a"],
         )
Ejemplo n.º 2
0
 def test_it_turns_a_tuple_into_a_dict(self):
     raw_data = (1.4, None)
     result = raw_data_to_evaluation(
         raw_data=raw_data,
         metric_names=["objective_a"],
     )
     self.assertEqual(result["objective_a"], raw_data)
Ejemplo n.º 3
0
 def test_it_accepts_a_list_for_single_objectives(self):
     raw_data = [({"arm__0": {}}, {"objective_a": (1.4, None)})]
     result = raw_data_to_evaluation(
         raw_data=raw_data,
         metric_names=["objective_a"],
     )
     self.assertEqual(raw_data, result)
Ejemplo n.º 4
0
    def complete_trial(
        self,
        trial_index: int,
        raw_data: TEvaluationOutcome,
        metadata: Optional[Dict[str, str]] = None,
        sample_size: Optional[int] = None,
    ) -> None:
        """
        Completes the trial with given metric values and adds optional metadata
        to it.

        Args:
            trial_index: Index of trial within the experiment.
            raw_data: Evaluation data for the trial. Can be a mapping from
                metric name to a tuple of mean and SEM, just a tuple of mean and
                SEM if only one metric in optimization, or just the mean if there
                is no SEM.  Can also be a list of (fidelities, mapping from
                metric name to a tuple of mean and SEM).
            metadata: Additional metadata to track about this run.
        """
        assert isinstance(
            trial_index, int
        ), f"Trial index must be an int, got: {trial_index}."  # pragma: no cover
        trial = self.experiment.trials[trial_index]
        if not isinstance(trial, Trial):
            raise NotImplementedError(
                "Batch trial functionality is not yet available through Service API."
            )

        if metadata is not None:
            trial._run_metadata = metadata

        arm_name = not_none(trial.arm).name
        objective_name = self.experiment.optimization_config.objective.metric.name
        evaluations = {
            arm_name:
            raw_data_to_evaluation(raw_data=raw_data,
                                   objective_name=objective_name)
        }
        sample_sizes = {arm_name: sample_size} if sample_size else {}
        # evaluations[arm_name] is either a trial evaluation
        # {metric_name -> (mean, SEM)} or a fidelity trial evaluation
        # [(fidelities, {metric_name -> (mean, SEM)})]
        if isinstance(evaluations[arm_name], dict):
            data = Data.from_evaluations(
                evaluations=cast(Dict[str, TTrialEvaluation], evaluations),
                trial_index=trial.index,
                sample_sizes=sample_sizes,
            )
        else:
            data = Data.from_fidelity_evaluations(
                evaluations=cast(Dict[str, TFidelityTrialEvaluation],
                                 evaluations),
                trial_index=trial.index,
                sample_sizes=sample_sizes,
            )
        trial.mark_completed()
        self.experiment.attach_data(data)
        self._updated_trials.append(trial_index)
        self._save_experiment_and_generation_strategy_if_possible()
Ejemplo n.º 5
0
    def _make_evaluations_and_data(
        self,
        trial: BaseTrial,
        raw_data: Union[TEvaluationOutcome, Dict[str, TEvaluationOutcome]],
        metadata: Optional[Dict[str, Union[str, int]]],
        sample_sizes: Optional[Dict[str, int]] = None,
    ) -> Tuple[Dict[str, TEvaluationOutcome], Data]:
        """Formats given raw data as Ax evaluations and `Data`.

        Args:
            trial: Trial within the experiment.
            raw_data: Metric outcomes for 1-arm trials, map from arm name to
                metric outcomes for batched trials.
            sample_size: Integer sample size for 1-arm trials, dict from arm
                name to sample size for batched trials. Optional.
            metadata: Additional metadata to track about this run.
            data_is_for_batched_trials: Whether making evaluations and data for
                a batched trial or a 1-arm trial.
        """
        if isinstance(trial, BatchTrial):
            assert isinstance(  # pragma: no cover
                raw_data, dict
            ), "Raw data must be a dict for batched trials."
        elif isinstance(trial, Trial):
            arm_name = not_none(trial.arm).name
            raw_data = {arm_name: raw_data}  # pyre-ignore[9]
        else:  # pragma: no cover
            raise ValueError(f"Unexpected trial type: {type(trial)}.")
        assert isinstance(raw_data, dict)
        not_trial_arm_names = set(raw_data.keys()) - set(trial.arms_by_name.keys())
        if not_trial_arm_names:
            raise ValueError(
                f"Arms {not_trial_arm_names} are not part of trial #{trial.index}."
            )
        evaluations = {
            arm_name: raw_data_to_evaluation(
                raw_data=raw_data[arm_name], objective_name=self.objective_name
            )
            for arm_name in raw_data
        }
        data = data_from_evaluations(
            evaluations=evaluations,
            trial_index=trial.index,
            sample_sizes=sample_sizes or {},
            start_time=(
                checked_cast_optional(int, metadata.get("start_time"))
                if metadata is not None
                else None
            ),
            end_time=(
                checked_cast_optional(int, metadata.get("end_time"))
                if metadata is not None
                else None
            ),
        )
        return evaluations, data
Ejemplo n.º 6
0
    def complete_trial(
        self,
        trial_index: int,
        raw_data: TEvaluationOutcome,
        metadata: Optional[Dict[str, Union[str, int]]] = None,
        sample_size: Optional[int] = None,
    ) -> None:
        """
        Completes the trial with given metric values and adds optional metadata
        to it.

        Args:
            trial_index: Index of trial within the experiment.
            raw_data: Evaluation data for the trial. Can be a mapping from
                metric name to a tuple of mean and SEM, just a tuple of mean and
                SEM if only one metric in optimization, or just the mean if there
                is no SEM.  Can also be a list of (fidelities, mapping from
                metric name to a tuple of mean and SEM).
            metadata: Additional metadata to track about this run.
            sample_size: Number of samples collected for the underlying arm,
                optional.
        """
        assert isinstance(
            trial_index, int
        ), f"Trial index must be an int, got: {trial_index}."  # pragma: no cover
        trial = self._get_trial(trial_index=trial_index)
        if metadata is not None:
            trial._run_metadata = metadata

        arm_name = not_none(trial.arm).name
        evaluations = {
            arm_name:
            raw_data_to_evaluation(raw_data=raw_data,
                                   objective_name=self.objective_name)
        }
        sample_sizes = {arm_name: sample_size} if sample_size else {}
        data = data_from_evaluations(
            evaluations=evaluations,
            trial_index=trial.index,
            sample_sizes=sample_sizes,
            start_time=(checked_cast_optional(int, metadata.get("start_time"))
                        if metadata is not None else None),
            end_time=(checked_cast_optional(int, metadata.get("end_time"))
                      if metadata is not None else None),
        )
        # In service API, a trial may be completed multiple times (for multiple
        # metrics, for example).
        trial.mark_completed(allow_repeat_completion=True)
        self.experiment.attach_data(data)
        data_for_logging = _round_floats_for_logging(
            item=evaluations[next(iter(evaluations.keys()))])
        logger.info(f"Completed trial {trial_index} with data: "
                    f"{_round_floats_for_logging(item=data_for_logging)}.")
        self._updated_trials.append(trial_index)
        self._save_experiment_and_generation_strategy_to_db_if_possible()
Ejemplo n.º 7
0
 def test_it_converts_to_floats_in_dict_and_leaves_tuples(self):
     result = raw_data_to_evaluation(
         raw_data={
             "objective_a": 6,
             "objective_b": 1.0,
             "objective_c": ("some", "tuple"),
         },
         metric_names=["objective_a", "objective_b"],
     )
     self.assertEqual(result["objective_a"], (6.0, None))
     self.assertEqual(result["objective_b"], (1.0, None))
     self.assertEqual(result["objective_c"], ("some", "tuple"))
Ejemplo n.º 8
0
    def complete_trial(
        self,
        trial_index: int,
        raw_data: TEvaluationOutcome,
        metadata: Optional[Dict[str, str]] = None,
        sample_size: Optional[int] = None,
    ) -> None:
        """
        Completes the trial with given metric values and adds optional metadata
        to it.

        Args:
            trial_index: Index of trial within the experiment.
            raw_data: Evaluation data for the trial. Can be a mapping from
                metric name to a tuple of mean and SEM, just a tuple of mean and
                SEM if only one metric in optimization, or just the mean if there
                is no SEM.  Can also be a list of (fidelities, mapping from
                metric name to a tuple of mean and SEM).
            metadata: Additional metadata to track about this run.
        """
        assert isinstance(
            trial_index, int
        ), f"Trial index must be an int, got: {trial_index}."  # pragma: no cover
        trial = self.experiment.trials[trial_index]
        if not isinstance(trial, Trial):
            raise NotImplementedError(
                "The Service API only supports `Trial`, not `BatchTrial`.")

        if metadata is not None:
            trial._run_metadata = metadata

        arm_name = not_none(trial.arm).name
        evaluations = {
            arm_name:
            raw_data_to_evaluation(raw_data=raw_data,
                                   objective_name=self.objective_name)
        }
        sample_sizes = {arm_name: sample_size} if sample_size else {}
        data = data_from_evaluations(evaluations=evaluations,
                                     trial_index=trial.index,
                                     sample_sizes=sample_sizes)
        # In service API, a trial may be completed multiple times (for multiple
        # metrics, for example).
        trial.mark_completed(allow_repeat_completion=True)
        self.experiment.attach_data(data)
        self._updated_trials.append(trial_index)
        self._save_experiment_and_generation_strategy_to_db_if_possible()
Ejemplo n.º 9
0
 def test_it_raises_for_unexpected_types(self):
     with self.assertRaises(ValueError):
         raw_data_to_evaluation(
             raw_data="1.6",
             metric_names=["objective_a"],
         )
Ejemplo n.º 10
0
 def test_it_turns_a_float_into_a_dict_of_tuple(self):
     result = raw_data_to_evaluation(
         raw_data=1.6,
         metric_names=["objective_a"],
     )
     self.assertEqual(result["objective_a"], (1.6, None))
Ejemplo n.º 11
0
 def test_it_requires_a_dict_for_multi_objectives(self):
     with self.assertRaises(ValueError):
         raw_data_to_evaluation(
             raw_data=(6.0, None),
             metric_names=["objective_a", "objective_b"],
         )
Ejemplo n.º 12
0
 def test_dict_entries_must_be_int_float_or_tuple(self):
     with self.assertRaises(ValueError):
         raw_data_to_evaluation(
             raw_data={"objective_a": [6.0, None]},
             metric_names=["objective_a"],
         )