def get_best_point( self) -> Tuple[TParameterization, Optional[TModelPredictArm]]: """Obtains the best point encountered in the course of this optimization.""" # Find latest trial which has a generator_run attached and get its predictions model_predictions = get_best_from_model_predictions( experiment=self.experiment) if model_predictions is not None: return model_predictions # Could not find through model, default to using raw objective. parameterization, values = get_best_raw_objective_point( experiment=self.experiment) # For values, grab just the means to conform to TModelPredictArm format. return ( parameterization, ( {k: v[0] for k, v in values.items()}, # v[0] is mean {k: { k: v[1] * v[1] } for k, v in values.items()}, # v[1] is sem ), )
def get_best_parameters( self ) -> Optional[Tuple[TParameterization, Optional[TModelPredictArm]]]: """ Return the best set of parameters the experiment has knowledge of. If experiment is in the optimization phase, return the best point determined by the model used in the latest optimization round, otherwise return none. Custom type `TModelPredictArm` is defined as `Tuple[Dict[str, float], Optional[Dict[str, Dict[str, float]]]]`, and stands for tuple of two mappings: metric name to its mean value and metric name to a mapping of other mapping name to covariance of the two metrics. Returns: Tuple of (best parameters, model predictions for best parameters). None if no data. """ # Find latest trial which has a generator_run attached and get its predictions model_predictions = get_best_from_model_predictions(experiment=self.experiment) if model_predictions is not None: # pragma: no cover return model_predictions # Could not find through model, default to using raw objective. parameterization, values = get_best_raw_objective_point( experiment=self.experiment ) return ( parameterization, ( {k: v[0] for k, v in values.items()}, # v[0] is mean {k: {k: v[1] * v[1]} for k, v in values.items()}, # v[1] is sem ), )