def test_validate_batch_log_data(): metrics_with_bad_key = [Metric("good-metric-key", 1.0, 0, 0), Metric("super-long-bad-key" * 1000, 4.0, 0, 0)] metrics_with_bad_val = [Metric("good-metric-key", "not-a-double-val", 0, 0)] metrics_with_bad_ts = [Metric("good-metric-key", 1.0, "not-a-timestamp", 0)] metrics_with_neg_ts = [Metric("good-metric-key", 1.0, -123, 0)] metrics_with_bad_step = [Metric("good-metric-key", 1.0, 0, "not-a-step")] params_with_bad_key = [Param("good-param-key", "hi"), Param("super-long-bad-key" * 1000, "but-good-val")] params_with_bad_val = [Param("good-param-key", "hi"), Param("another-good-key", "but-bad-val" * 1000)] tags_with_bad_key = [RunTag("good-tag-key", "hi"), RunTag("super-long-bad-key" * 1000, "but-good-val")] tags_with_bad_val = [RunTag("good-tag-key", "hi"), RunTag("another-good-key", "but-bad-val" * 1000)] bad_kwargs = { "metrics": [metrics_with_bad_key, metrics_with_bad_val, metrics_with_bad_ts, metrics_with_neg_ts, metrics_with_bad_step], "params": [params_with_bad_key, params_with_bad_val], "tags": [tags_with_bad_key, tags_with_bad_val], } good_kwargs = {"metrics": [], "params": [], "tags": []} for arg_name, arg_values in bad_kwargs.items(): for arg_value in arg_values: final_kwargs = copy.deepcopy(good_kwargs) final_kwargs[arg_name] = arg_value with pytest.raises(MlflowException): _validate_batch_log_data(**final_kwargs) # Test that we don't reject entities within the limit _validate_batch_log_data( metrics=[Metric("metric-key", 1.0, 0, 0)], params=[Param("param-key", "param-val")], tags=[RunTag("tag-key", "tag-val")])
def log_batch(self, run_id, metrics, params, tags): _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) run = self.get_run(run_id) check_run_is_active(run.info) try: for param in params: self.log_param(run_id, param) for metric in metrics: self.log_metric(run_id, metric) for tag in tags: self.set_tag(run_id, tag) except Exception as e: raise MlflowException(e, INTERNAL_ERROR)
def log_batch(self, run_id, metrics, params, tags): _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) _validate_param_keys_unique(params) run_info = self._get_run_info(run_id) check_run_is_active(run_info) try: for param in params: self._log_run_param(run_info, param) for metric in metrics: self._log_run_metric(run_info, metric) for tag in tags: self._set_run_tag(run_info, tag) except Exception as e: raise MlflowException(e, INTERNAL_ERROR)
def log_batch(self, run_id, metrics, params, tags): _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) with self.ManagedSessionMaker() as session: run = self._get_run(run_uuid=run_id, session=session) self._check_run_is_active(run) try: for param in params: self.log_param(run_id, param) self.log_metrics(run_id, metrics) for tag in tags: self.set_tag(run_id, tag) except MlflowException as e: raise e except Exception as e: raise MlflowException(e, INTERNAL_ERROR)
def log_batch(self, run_id: str, metrics: List[Metric], params: List[Param], tags: List[RunTag]) -> None: _validate_run_id(run_id) _validate_batch_log_data(metrics, params, tags) _validate_batch_log_limits(metrics, params, tags) run = self._get_run(run_id=run_id) self._check_run_is_active(run) try: for metric in metrics: self._log_metric(run, metric) for param in params: self._log_param(run, param) for tag in tags: self._set_tag(run, tag) run.save() except MlflowException as e: raise e except Exception as e: raise MlflowException(e, INTERNAL_ERROR)