def test_creation_and_hydration(self): key = random_str() value = 10000 ts = int(time.time()) metric = Metric(key, value, ts) self._check(metric, key, value, ts) as_dict = {"key": key, "value": value, "timestamp": ts} self.assertEqual(dict(metric), as_dict) proto = metric.to_proto() metric2 = metric.from_proto(proto) self._check(metric2, key, value, ts) metric3 = Metric.from_dictionary(as_dict) self._check(metric3, key, value, ts)
def _get_metric_from_file(parent_path, metric_name): metric_data = read_file(parent_path, metric_name) if len(metric_data) == 0: raise Exception("Metric '%s' is malformed. No data found." % metric_name) last_line = metric_data[-1] timestamp, val = last_line.strip().split(" ") return Metric(metric_name, float(val), int(timestamp))
def _log_metric(): request_message = _get_request_message(LogMetric()) metric = Metric(request_message.key, request_message.value, request_message.timestamp) _get_store().log_metric(request_message.run_uuid, metric) response_message = LogMetric.Response() response = Response(mimetype='application/json') response.set_data(_message_to_json(response_message)) return response
def test_weird_metric_names(self): WEIRD_METRIC_NAME = "this is/a weird/but valid metric" fs = FileStore(self.test_root) run_uuid = self.exp_data[0]["runs"][0] fs.log_metric(run_uuid, Metric(WEIRD_METRIC_NAME, 10, 1234)) metric = fs.get_metric(run_uuid, WEIRD_METRIC_NAME) assert metric.key == WEIRD_METRIC_NAME assert metric.value == 10 assert metric.timestamp == 1234
def from_proto(cls, proto): run_data = cls() # iterate proto and add metrics and params for proto_metric in proto.metrics: run_data._add_metric(Metric.from_proto(proto_metric)) for proto_param in proto.params: run_data._add_param(Param.from_proto(proto_param)) return run_data
def from_proto(cls, proto): run_data = cls() # iterate proto and add metrics, params, and tags for proto_metric in proto.metrics: run_data._add_metric(Metric.from_proto(proto_metric)) for proto_param in proto.params: run_data._add_param(Param.from_proto(proto_param)) for proto_tag in proto.tags: run_data._add_tag(RunTag.from_proto(proto_tag)) return run_data
def _log_metric(): request_message = _get_request_message(LogMetric()) metric = Metric(request_message.key, request_message.value, request_message.timestamp) _get_store().log_metric(request_message.run_uuid, metric) response_message = LogMetric.Response() response = Response(mimetype='application/json') response.set_data( MessageToJson(response_message, preserving_proto_field_name=True)) return response
def report_now(self, registry=None, timestamp=None): registry = registry or self.registry timestamp = timestamp or int(round(self.clock.time())) active_run = self.active_run or mlflow.active_run() metrics = (registry or self.registry).dump_metrics() for mkey, mdict in metrics.items(): for mname, mvalue in mdict.items(): active_run.log_metric( Metric(f"{mkey}_{mname}", mvalue, timestamp))
def get_metric_history(self, run_uuid, metric_key): parent_path, metric_files = self._get_run_files(run_uuid, "metric") if metric_key not in metric_files: raise Exception("Metric '%s' not found under run '%s'" % (metric_key, run_uuid)) metric_data = read_file(parent_path, metric_key) rsl = [] for pair in metric_data: ts, val = pair.strip().split(" ") rsl.append(Metric(metric_key, float(val), int(ts))) return rsl
def log_metric(key, value): """ Logs the passed-in metric under the current run, creating a run if necessary. :param key: Metric name (string) :param value: Metric value (float) """ if not isinstance(value, numbers.Number): print("WARNING: The metric {}={} was not logged because the value is not a number.".format( key, value), file=sys.stderr) return _get_or_start_run().log_metric(Metric(key, value, int(time.time())))
def get_metric_history(self, run_uuid, metric_key): """ Returns all logged value for a given metric. :param run_uuid: Unique identifier for run :param metric_key: Metric name within the run :return: A list of float values logged for the give metric if logged, else empty list """ req_body = _message_to_json(GetMetricHistory(run_uuid=run_uuid, metric_key=metric_key)) response_proto = self._call_endpoint(GetMetricHistory, req_body) return [Metric.from_proto(metric).value for metric in response_proto.metrics]
def get_metric(self, run_uuid, metric_key): """ Returns the last logged value for a given metric. :param run_uuid: Unique identifier for run :param metric_key: Metric name within the run :return: A single float value for the give metric if logged, else None """ req_body = _message_to_json(GetMetric(run_uuid=run_uuid, metric_key=metric_key)) response_proto = self._call_endpoint(GetMetric, req_body) return Metric.from_proto(response_proto.metric)
def _create(): metrics = [ Metric(random_str(10), random_int(), int(time.time() + random_int(-1e4, 1e4))) for x in range(100) ] # noqa params = [ Param(random_str(10), random_str(random_int(10, 35))) for x in range(10) ] # noqa rd = RunData() for p in params: rd.add_param(p) for m in metrics: rd.add_metric(m) return rd, metrics, params
def _log_metrics(self): """ Helper method to log metrics into specified run. """ timestamp = int(time.time() * 1000) self.client.log_batch( self.run_id, metrics=[ Metric( key=_gen_log_key(key, self.dataset_name), value=value, timestamp=timestamp, step=0, ) for key, value in self.metrics.items() ], )
def _add_metric(self, metric): if isinstance(metric, dict): metric = Metric(metric['key'], metric['value'], metric['timestamp']) self._metrics.append(metric)