def test_validate_batch_log_limits(): too_many_metrics = [ Metric("metric-key-%s" % i, 1, 0, i * 2) for i in range(1001) ] too_many_params = [Param("param-key-%s" % i, "b") for i in range(101)] too_many_tags = [RunTag("tag-key-%s" % i, "b") for i in range(101)] good_kwargs = {"metrics": [], "params": [], "tags": []} bad_kwargs = { "metrics": [too_many_metrics], "params": [too_many_params], "tags": [too_many_tags], } for arg_name, arg_values in bad_kwargs.items(): for arg_value in arg_values: final_kwargs = copy.deepcopy(good_kwargs) final_kwargs[arg_name] = arg_value with pytest.raises(MlflowException): _validate_batch_log_limits(**final_kwargs) # Test the case where there are too many entities in aggregate with pytest.raises(MlflowException): _validate_batch_log_limits(too_many_metrics[:900], too_many_params[:51], too_many_tags[:50]) # Test that we don't reject entities within the limit _validate_batch_log_limits(too_many_metrics[:1000], [], []) _validate_batch_log_limits([], too_many_params[:100], []) _validate_batch_log_limits([], [], too_many_tags[:100])
def log_batch(self, run_id, metrics, params, tags): _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) run = self.get_run(run_id) check_run_is_active(run.info) try: for param in params: self.log_param(run_id, param) for metric in metrics: self.log_metric(run_id, metric) for tag in tags: self.set_tag(run_id, tag) except Exception as e: raise MlflowException(e, INTERNAL_ERROR)
def log_batch(self, run_id, metrics, params, tags): _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) _validate_param_keys_unique(params) run_info = self._get_run_info(run_id) check_run_is_active(run_info) try: for param in params: self._log_run_param(run_info, param) for metric in metrics: self._log_run_metric(run_info, metric) for tag in tags: self._set_run_tag(run_info, tag) except Exception as e: raise MlflowException(e, INTERNAL_ERROR)
def log_batch(self, run_id, metrics, params, tags): _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) with self.ManagedSessionMaker() as session: run = self._get_run(run_uuid=run_id, session=session) self._check_run_is_active(run) try: for param in params: self.log_param(run_id, param) self.log_metrics(run_id, metrics) for tag in tags: self.set_tag(run_id, tag) except MlflowException as e: raise e except Exception as e: raise MlflowException(e, INTERNAL_ERROR)
def log_batch(self, run_id: str, metrics: List[Metric], params: List[Param], tags: List[RunTag]) -> None: _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) run = self._get_run(run_id=run_id) self._check_run_is_active(run) try: for metric in metrics: self._log_metric(run, metric) for param in params: self._log_param(run, param) for tag in tags: self._set_tag(run, tag) run.save() except MlflowException as e: raise e except Exception as e: raise MlflowException(e, INTERNAL_ERROR)