def _make_evaluations_and_data( self, trial: BaseTrial, raw_data: Union[TEvaluationOutcome, Dict[str, TEvaluationOutcome]], metadata: Optional[Dict[str, Union[str, int]]], sample_sizes: Optional[Dict[str, int]] = None, ) -> Tuple[Dict[str, TEvaluationOutcome], Data]: """Formats given raw data as Ax evaluations and `Data`. Args: trial: Trial within the experiment. raw_data: Metric outcomes for 1-arm trials, map from arm name to metric outcomes for batched trials. sample_size: Integer sample size for 1-arm trials, dict from arm name to sample size for batched trials. Optional. metadata: Additional metadata to track about this run. data_is_for_batched_trials: Whether making evaluations and data for a batched trial or a 1-arm trial. """ if isinstance(trial, BatchTrial): assert isinstance( # pragma: no cover raw_data, dict ), "Raw data must be a dict for batched trials." elif isinstance(trial, Trial): arm_name = not_none(trial.arm).name raw_data = {arm_name: raw_data} # pyre-ignore[9] else: # pragma: no cover raise ValueError(f"Unexpected trial type: {type(trial)}.") assert isinstance(raw_data, dict) not_trial_arm_names = set(raw_data.keys()) - set(trial.arms_by_name.keys()) if not_trial_arm_names: raise ValueError( f"Arms {not_trial_arm_names} are not part of trial #{trial.index}." ) evaluations = { arm_name: raw_data_to_evaluation( raw_data=raw_data[arm_name], objective_name=self.objective_name ) for arm_name in raw_data } data = data_from_evaluations( evaluations=evaluations, trial_index=trial.index, sample_sizes=sample_sizes or {}, start_time=( checked_cast_optional(int, metadata.get("start_time")) if metadata is not None else None ), end_time=( checked_cast_optional(int, metadata.get("end_time")) if metadata is not None else None ), ) return evaluations, data
def complete_trial( self, trial_index: int, raw_data: TEvaluationOutcome, metadata: Optional[Dict[str, Union[str, int]]] = None, sample_size: Optional[int] = None, ) -> None: """ Completes the trial with given metric values and adds optional metadata to it. Args: trial_index: Index of trial within the experiment. raw_data: Evaluation data for the trial. Can be a mapping from metric name to a tuple of mean and SEM, just a tuple of mean and SEM if only one metric in optimization, or just the mean if there is no SEM. Can also be a list of (fidelities, mapping from metric name to a tuple of mean and SEM). metadata: Additional metadata to track about this run. sample_size: Number of samples collected for the underlying arm, optional. """ assert isinstance( trial_index, int ), f"Trial index must be an int, got: {trial_index}." # pragma: no cover trial = self._get_trial(trial_index=trial_index) if metadata is not None: trial._run_metadata = metadata arm_name = not_none(trial.arm).name evaluations = { arm_name: raw_data_to_evaluation(raw_data=raw_data, objective_name=self.objective_name) } sample_sizes = {arm_name: sample_size} if sample_size else {} data = data_from_evaluations( evaluations=evaluations, trial_index=trial.index, sample_sizes=sample_sizes, start_time=(checked_cast_optional(int, metadata.get("start_time")) if metadata is not None else None), end_time=(checked_cast_optional(int, metadata.get("end_time")) if metadata is not None else None), ) # In service API, a trial may be completed multiple times (for multiple # metrics, for example). trial.mark_completed(allow_repeat_completion=True) self.experiment.attach_data(data) data_for_logging = _round_floats_for_logging( item=evaluations[next(iter(evaluations.keys()))]) logger.info(f"Completed trial {trial_index} with data: " f"{_round_floats_for_logging(item=data_for_logging)}.") self._updated_trials.append(trial_index) self._save_experiment_and_generation_strategy_to_db_if_possible()
def complete_trial( self, trial_index: int, raw_data: TEvaluationOutcome, metadata: Optional[Dict[str, str]] = None, sample_size: Optional[int] = None, ) -> None: """ Completes the trial with given metric values and adds optional metadata to it. Args: trial_index: Index of trial within the experiment. raw_data: Evaluation data for the trial. Can be a mapping from metric name to a tuple of mean and SEM, just a tuple of mean and SEM if only one metric in optimization, or just the mean if there is no SEM. Can also be a list of (fidelities, mapping from metric name to a tuple of mean and SEM). metadata: Additional metadata to track about this run. """ assert isinstance( trial_index, int ), f"Trial index must be an int, got: {trial_index}." # pragma: no cover trial = self.experiment.trials[trial_index] if not isinstance(trial, Trial): raise NotImplementedError( "The Service API only supports `Trial`, not `BatchTrial`.") if metadata is not None: trial._run_metadata = metadata arm_name = not_none(trial.arm).name evaluations = { arm_name: raw_data_to_evaluation(raw_data=raw_data, objective_name=self.objective_name) } sample_sizes = {arm_name: sample_size} if sample_size else {} data = data_from_evaluations(evaluations=evaluations, trial_index=trial.index, sample_sizes=sample_sizes) # In service API, a trial may be completed multiple times (for multiple # metrics, for example). trial.mark_completed(allow_repeat_completion=True) self.experiment.attach_data(data) self._updated_trials.append(trial_index) self._save_experiment_and_generation_strategy_to_db_if_possible()