예제 #1
0
파일: test_data.py 프로젝트: xiecong/Ax
 def testFromFidelityEvaluations(self):
     data = Data.from_fidelity_evaluations(
         evaluations={
             "0_1": [
                 ({
                     "f1": 1.0,
                     "f2": 0.5
                 }, {
                     "b": (3.7, 0.5)
                 }),
                 ({
                     "f1": 1.0,
                     "f2": 0.75
                 }, {
                     "b": (3.8, 0.5)
                 }),
             ]
         },
         trial_index=0,
         sample_sizes={"0_1": 2},
         start_time=current_timestamp_in_millis(),
         end_time=current_timestamp_in_millis(),
     )
     self.assertEqual(len(data.df), 2)
     self.assertIn("start_time", data.df)
     self.assertIn("end_time", data.df)
예제 #2
0
    def complete_trial(
        self,
        trial_index: int,
        raw_data: TEvaluationOutcome,
        metadata: Optional[Dict[str, str]] = None,
        sample_size: Optional[int] = None,
    ) -> None:
        """
        Completes the trial with given metric values and adds optional metadata
        to it.

        Args:
            trial_index: Index of trial within the experiment.
            raw_data: Evaluation data for the trial. Can be a mapping from
                metric name to a tuple of mean and SEM, just a tuple of mean and
                SEM if only one metric in optimization, or just the mean if there
                is no SEM.  Can also be a list of (fidelities, mapping from
                metric name to a tuple of mean and SEM).
            metadata: Additional metadata to track about this run.
        """
        assert isinstance(
            trial_index, int
        ), f"Trial index must be an int, got: {trial_index}."  # pragma: no cover
        trial = self.experiment.trials[trial_index]
        if not isinstance(trial, Trial):
            raise NotImplementedError(
                "Batch trial functionality is not yet available through Service API."
            )

        if metadata is not None:
            trial._run_metadata = metadata

        arm_name = not_none(trial.arm).name
        objective_name = self.experiment.optimization_config.objective.metric.name
        evaluations = {
            arm_name:
            raw_data_to_evaluation(raw_data=raw_data,
                                   objective_name=objective_name)
        }
        sample_sizes = {arm_name: sample_size} if sample_size else {}
        # evaluations[arm_name] is either a trial evaluation
        # {metric_name -> (mean, SEM)} or a fidelity trial evaluation
        # [(fidelities, {metric_name -> (mean, SEM)})]
        if isinstance(evaluations[arm_name], dict):
            data = Data.from_evaluations(
                evaluations=cast(Dict[str, TTrialEvaluation], evaluations),
                trial_index=trial.index,
                sample_sizes=sample_sizes,
            )
        else:
            data = Data.from_fidelity_evaluations(
                evaluations=cast(Dict[str, TFidelityTrialEvaluation],
                                 evaluations),
                trial_index=trial.index,
                sample_sizes=sample_sizes,
            )
        trial.mark_completed()
        self.experiment.attach_data(data)
        self._updated_trials.append(trial_index)
        self._save_experiment_and_generation_strategy_if_possible()
예제 #3
0
파일: test_data.py 프로젝트: jlin27/Ax
 def testFromFidelityEvaluations(self):
     data = Data.from_fidelity_evaluations(
         evaluations={
             "0_1": [
                 ({"f1": 1.0, "f2": 0.5}, {"b": (3.7, 0.5)}),
                 ({"f1": 1.0, "f2": 0.75}, {"b": (3.8, 0.5)}),
             ]
         },
         trial_index=0,
         sample_sizes={"0_1": 2},
     )
     self.assertEqual(len(data.df), 2)
예제 #4
0
def data_from_evaluations(
    evaluations: Dict[str, TEvaluationOutcome],
    trial_index: int,
    sample_sizes: Dict[str, int],
    start_time: Optional[int] = None,
    end_time: Optional[int] = None,
) -> Data:
    """Transforms evaluations into Ax Data.

    Each evaluation is either a trial evaluation: {metric_name -> (mean, SEM)}
    or a fidelity trial evaluation for multi-fidelity optimizations:
    [(fidelities, {metric_name -> (mean, SEM)})].

    Args:
        evalutions: Mapping from arm name to evaluation.
        trial_index: Index of the trial, for which the evaluations are.
        sample_sizes: Number of samples collected for each arm, may be empty
            if unavailable.
        start_time: Optional start time of run of the trial that produced this
            data, in milliseconds.
        end_time: Optional end time of run of the trial that produced this
            data, in milliseconds.
    """
    if all(isinstance(evaluations[x], dict) for x in evaluations.keys()):
        # All evaluations are no-fidelity evaluations.
        data = Data.from_evaluations(
            evaluations=cast(Dict[str, TTrialEvaluation], evaluations),
            trial_index=trial_index,
            sample_sizes=sample_sizes,
            start_time=start_time,
            end_time=end_time,
        )
    elif all(isinstance(evaluations[x], list) for x in evaluations.keys()):
        # All evaluations are with-fidelity evaluations.
        data = Data.from_fidelity_evaluations(
            evaluations=cast(Dict[str, TFidelityTrialEvaluation], evaluations),
            trial_index=trial_index,
            sample_sizes=sample_sizes,
            start_time=start_time,
            end_time=end_time,
        )
    else:
        raise ValueError(  # pragma: no cover
            "Evaluations included a mixture of no-fidelity and with-fidelity "
            "evaluations, which is not currently supported."
        )
    return data