def _save_logs_to_mlflow(logs: Optional[skein.model.ApplicationLogs], containers: Dict[str, Tuple[str, str]], n_try: int): if not logs: return for key, logs in logs.items(): if key in containers: task, status = containers[key] filename = mlflow.format_key(f"{task}_{status}_{n_try}") else: filename = mlflow.format_key(f"{key}_{n_try}") mlflow.save_text_to_mlflow(logs, filename)
def log_mlflow(self, n_try: int): content = "" for metric_name, value in self._asdict().items(): if isinstance(value, dict): for k, v in value.items(): if v: formatted_key = mlflow.format_key(f"{metric_name}_{k}_{n_try}") content = content + f"{formatted_key}: {v.total_seconds()} secs\n" else: if value: formatted_key = mlflow.format_key(f"{metric_name}_{n_try}") content = content + f"{formatted_key}: {value.total_seconds()} secs\n" mlflow.save_text_to_mlflow(content, "tf_yarn_duration_stats")
def log(self): for evaluator in self.evaluator_list: cur_eval_stats = [] for key, value in MONITORED_METRICS.items(): stat = self.app.kv.get(f'{evaluator}/{key}', None) stat = float(stat.decode()) if stat else None if stat is not None and stat != self.last_metrics[evaluator][key]: if key not in self.log_thresholds or\ (self.log_thresholds[key][0] <= stat <= self.log_thresholds[key][1]): cur_eval_stats.append(f'{value}: {stat}') self.last_metrics[evaluator][key] = stat mlflow.log_metric(mlflow.format_key(f"{evaluator}_{key}_{self.n_try}"), stat) if len(cur_eval_stats) > 0: logger.info(f'Statistics for {evaluator}: {" ".join(cur_eval_stats)}')