def log_batch(self, run_id, metrics=(), params=(), tags=()): """ Log multiple metrics, params, and/or tags. :param run_id: String ID of the run :param metrics: If provided, List of Metric(key, value, timestamp) instances. :param params: If provided, List of Param(key, value) instances. :param tags: If provided, List of RunTag(key, value) instances. Raises an MlflowException if any errors occur. :return: None """ if len(metrics) == 0 and len(params) == 0 and len(tags) == 0: return for metric in metrics: _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) for param in params: _validate_param_name(param.key) for tag in tags: _validate_tag_name(tag.key) self.store.log_batch(run_id=run_id, metrics=metrics, params=params, tags=tags)
def log_metric(self, run_id, metric): _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) is_nan = math.isnan(metric.value) if is_nan: value = 0 elif math.isinf(metric.value): # NB: Sql can not represent Infs = > We replace +/- Inf with max/min 64b float value value = 1.7976931348623157e308 if metric.value > 0 else -1.7976931348623157e308 else: value = metric.value with self.ManagedSessionMaker() as session: run = self._get_run(run_uuid=run_id, session=session) self._check_run_is_active(run) # ToDo: Consider prior checks for null, type, metric name validations, ... etc. logged_metric, just_created = self._get_or_create( model=SqlMetric, run_uuid=run_id, key=metric.key, value=value, timestamp=metric.timestamp, step=metric.step, session=session, is_nan=is_nan) # Conditionally update the ``latest_metrics`` table if the logged metric was not # already present in the ``metrics`` table. If the logged metric was already present, # we assume that the ``latest_metrics`` table already accounts for its presence if just_created: self._update_latest_metric_if_necessary(logged_metric, session)
def log_metric(self, run_id, metric): _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) with self.ManagedSessionMaker() as session: run = self._get_run(run_uuid=run_id, session=session) self._check_run_is_active(run) # ToDo: Consider prior checks for null, type, metric name validations, ... etc. self._get_or_create(model=SqlMetric, run_uuid=run_id, key=metric.key, value=metric.value, timestamp=metric.timestamp, step=metric.step, session=session)
def log_metric(self, run_id, key, value, timestamp=None): """ Log a metric against the run ID. If timestamp is not provided, uses the current timestamp. """ timestamp = timestamp if timestamp is not None else int(time.time()) _validate_metric(key, value, timestamp) metric = Metric(key, value, timestamp) self.store.log_metric(run_id, metric)
def log_metric(self, run_id, key, value, timestamp=None, step=None): """ Log a metric against the run ID. The timestamp defaults to the current timestamp. The step defaults to 0. """ timestamp = timestamp if timestamp is not None else int(time.time()) step = step if step is not None else 0 _validate_metric(key, value, timestamp, step) metric = Metric(key, value, timestamp, step) self.store.log_metric(run_id, metric)
def log_metric(self, run_id, key, value, timestamp=None, step=None): """ Log a metric against the run ID. If timestamp is not provided, uses the current timestamp. The metric's step defaults to 0 if unspecified. """ timestamp = timestamp if timestamp is not None else int(time.time()) step = step if step is not None else 0 _validate_metric(key, value, timestamp, step) metric = Metric(key, value, timestamp, step) self.store.log_metric(run_id, metric)
def log_metric(self, run_id, key, value, timestamp=None, step=None): """ Log a metric against the run ID. :param run_id: The run id to which the metric should be logged. :param key: Metric name. :param value: Metric value (float). Note that some special values such as +/- Infinity may be replaced by other values depending on the store. For example, the SQLAlchemy store replaces +/- Inf with max / min float values. :param timestamp: Time when this metric was calculated. Defaults to the current system time. :param step: Training step (iteration) at which was the metric calculated. Defaults to 0. """ timestamp = timestamp if timestamp is not None else int(time.time()) step = step if step is not None else 0 _validate_metric(key, value, timestamp, step) metric = Metric(key, value, timestamp, step) self.store.log_metric(run_id, metric)
def log_batch(self, run_id, metrics, params, tags): """ Log multiple metrics, params, and/or tags. :param metrics: List of Metric(key, value, timestamp) instances. :param params: List of Param(key, value) instances. :param tags: List of RunTag(key, value) instances. Raises an MlflowException if any errors occur. :returns: None """ for metric in metrics: _validate_metric(metric.key, metric.value, metric.timestamp) for param in params: _validate_param_name(param.key) for tag in tags: _validate_tag_name(tag.key) self.store.log_batch(run_id=run_id, metrics=metrics, params=params, tags=tags)
def _log_metric(self, run: ElasticRun, metric: Metric) -> None: _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) is_nan = math.isnan(metric.value) if is_nan: value = 0. elif math.isinf(metric.value): value = 1.7976931348623157e308 if metric.value > 0 else -1.7976931348623157e308 else: value = metric.value new_metric = ElasticMetric(key=metric.key, value=value, timestamp=metric.timestamp, step=metric.step, is_nan=is_nan, run_id=run.run_id) self._update_latest_metric_if_necessary(new_metric, run) new_metric.save()
def log_metric(self, run_id, key, value, timestamp=None, step=None): """ Log a metric against the run ID. :param run_id: The run id to which the metric should be logged. :param key: Metric name (string). This string may only contain alphanumerics, underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/). All backend stores will support keys up to length 250, but some may support larger keys. :param value: Metric value (float). Note that some special values such as +/- Infinity may be replaced by other values depending on the store. For example, the SQLAlchemy store replaces +/- Inf with max / min float values. All backend stores will support values up to length 5000, but some may support larger values. :param timestamp: Time when this metric was calculated. Defaults to the current system time. :param step: Training step (iteration) at which was the metric calculated. Defaults to 0. """ timestamp = timestamp if timestamp is not None else int(time.time() * 1000) step = step if step is not None else 0 _validate_metric(key, value, timestamp, step) metric = Metric(key, value, timestamp, step) self.store.log_metric(run_id, metric)
def log_metric(self, run_id, metric): _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) is_nan = math.isnan(metric.value) if is_nan: value = 0 elif math.isinf(metric.value): # NB: Sql can not represent Infs = > We replace +/- Inf with max/min 64b float value value = 1.7976931348623157e308 if metric.value > 0 else -1.7976931348623157e308 else: value = metric.value with self.ManagedSessionMaker() as session: run = self._get_run(run_uuid=run_id, session=session) self._check_run_is_active(run) # ToDo: Consider prior checks for null, type, metric name validations, ... etc. self._get_or_create(model=SqlMetric, run_uuid=run_id, key=metric.key, value=value, timestamp=metric.timestamp, step=metric.step, session=session, is_nan=is_nan)
def log_metrics(self, run_id, metrics, batch_mode=True): if metrics is None: return with self.ManagedSessionMaker() as session: try: run = self._get_run(run_uuid=run_id, session=session) self._check_run_is_active(run) # metrics are grouped by metric name for last metric calculation metrics_per_key = {} for metric in metrics: _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) is_nan = math.isnan(metric.value) if is_nan: value = 0 elif math.isinf(metric.value): # NB: Sql can not represent Infs = > We replace +/- Inf # with max/min 64b float value value = (1.7976931348623157e308 if metric.value > 0 else -1.7976931348623157e308) else: value = metric.value # ToDo: Consider prior checks for null, type, metric name validations, ... etc. if batch_mode: # In batch mode, all metrics are added to the session. # This is much faster than checking for each metric if a metric with the # same name, value, ts, and step already exists and adding them otherwise. # Conflicts will be checked during saving. If conflicts are detected # during saving, metrics are inserted one after another # using _get_or_create logged_metric = SqlMetric( run_uuid=run_id, key=metric.key, value=value, timestamp=metric.timestamp, step=metric.step, is_nan=is_nan, ) self._save_to_db(session, logged_metric) just_created = True else: # All metrics are added one after another with additional check if a # metric with same name, value, ts, and step already exists logged_metric, just_created = self._get_or_create( model=SqlMetric, session=session, run_uuid=run_id, key=metric.key, value=value, timestamp=metric.timestamp, step=metric.step, is_nan=is_nan, ) if just_created: # collect all metrics that are inserted grouped by metric name # to update the latest metric for each metric if metric.key not in metrics_per_key: metrics_per_key[metric.key] = [] metrics_per_key[metric.key].append(logged_metric) # Conditionally update the ``latest_metrics`` table if the logged metric was not # already present in the ``metrics`` table. If the logged metric was already # present, we assume that the ``latest_metrics`` table already accounts for # its presence for logged_metric_list in metrics_per_key.values(): # ToDo: move grouping by metric name functionality # to _update_latest_metric_if_necessary self._update_latest_metric_if_necessary( logged_metrics=logged_metric_list, session=session) # Explicitly commit the session in order to catch potential integrity errors # if commit fails, a metric is already in the store (same run id, step, # timestamp, and value) and we have to store each metric individually session.flush() session.commit() except sqlalchemy.exc.IntegrityError: session.rollback() # Metric with same value, ts, and step already exists # Insert metrics with disabled batch mode one after another if batch_mode and len(metrics) > 1: self.log_metrics(run_id, metrics, batch_mode=False)