def _process_special_args(self): if "--frontend" in sys.argv: try: self._open_frontend() except KeyboardInterrupt: return Main.EXIT_FAILURE return self._process_special_args() if self.interactive: for opt in "forge", "--version", "--help", "--dump-config": if opt in self.argv: raise ValueError( "\"%s\" is not supported in interactive mode" % opt) return None if len(sys.argv) > 1 and sys.argv[1] == "forge": from veles.forge.forge_client import __run__ as forge_run del sys.argv[1] action = sys.argv[1] try: forge_run() return Main.EXIT_SUCCESS except Exception as e: if isinstance(e, SystemExit): raise from_none(e) self.exception("Failed to run forge %s", action) return Main.EXIT_FAILURE if "--version" in sys.argv: self._print_version() return Main.EXIT_SUCCESS if "--html-help" in sys.argv: veles.__html__() return Main.EXIT_SUCCESS if "--help" in sys.argv: # help text requires UTF-8, but the default codec is ascii over ssh Logger.ensure_utf8_streams() if "--dump-config" in sys.argv: self.info("Scanning for the plugins...") self.debug("Loaded plugins: %s", veles.__plugins__) root.print_() return Main.EXIT_SUCCESS return None
def gather_info(self): self.info("Gathering the results...") info = { "plots": self._gather_plots() if self._include_plots else {}, "workflow_graph": self.workflow_graphs, "name": self.workflow.name, "description": self.workflow.__doc__, "id": self.launcher.id, "loader": self.loader_unit, "python": "%s %s" % (platform.python_implementation(), platform.python_version()), "pid": os.getpid(), "logid": self.launcher.log_id, "seeds": self.launcher.seeds, "config_root": root, "workflow_file": self.launcher.workflow_file, "config_file": self.launcher.config_file, "unit_run_times_by_class": dict(self.workflow.get_unit_run_time_stats()), "unit_run_times_by_name": dict(self.workflow.get_unit_run_time_stats(by_name=True)) } sio = StringIO() root.print_(file=sio) info["config_text"] = sio.getvalue() workflow_dir = os.path.dirname(self.launcher.workflow_file) manifest_file = os.path.join(workflow_dir, "manifest.json") if os.access(manifest_file, os.R_OK): with open(manifest_file, "r") as fin: manifest = json.load(fin) image_path = os.path.join(workflow_dir, manifest["image"]) if not os.access(image_path, os.R_OK): self.warning("Could not read %s", image_path) info["image"] = None else: with open(image_path, "rb") as fin: info["image"] = { "name": manifest["image"], "data": fin.read() } else: info["image"] = None mins, secs = divmod(time() - self.launcher.start_time, 60) hours, mins = divmod(mins, 60) days, hours = divmod(hours, 24) info.update({"days": days, "hours": hours, "mins": mins, "secs": secs}) if self.loader_unit is not None: unit = self.loader_unit if unit.has_labels: info.update({ "labels": tuple(unit.labels_mapping), "label_stats": (dict(unit.test_diff_labels), dict(unit.valid_diff_labels), dict(unit.train_diff_labels)) }) info.update({ "class_lengths": tuple(unit.class_lengths), "total_samples": unit.total_samples, "epochs": unit.epoch_number, "normalization": unit.normalization_type, "normalization_parameters": unit.normalization_parameters }) if hasattr(unit, "target_normalizer"): info.update({ "target_normalization": unit.target_normalization_type, "target_normalization_parameters": unit.target_normalization_parameters }) info["results"] = results = {} for prov in self.result_providers: results.update(prov.get_metric_values()) return info
def gather_info(self): self.info("Gathering the results...") info = { "plots": self._gather_plots() if self._include_plots else {}, "workflow_graph": self.workflow_graphs, "name": self.workflow.name, "description": self.workflow.__doc__, "id": self.launcher.id, "loader": self.loader_unit, "python": "%s %s" % (platform.python_implementation(), platform.python_version()), "pid": os.getpid(), "logid": self.launcher.log_id, "seeds": self.launcher.seeds, "config_root": root, "workflow_file": self.launcher.workflow_file, "config_file": self.launcher.config_file, "unit_run_times_by_class": dict(self.workflow.get_unit_run_time_stats()), "unit_run_times_by_name": dict(self.workflow.get_unit_run_time_stats(by_name=True)) } sio = StringIO() root.print_(file=sio) info["config_text"] = sio.getvalue() workflow_dir = os.path.dirname(self.launcher.workflow_file) manifest_file = os.path.join(workflow_dir, "manifest.json") if os.access(manifest_file, os.R_OK): with open(manifest_file, "r") as fin: manifest = json.load(fin) image_path = os.path.join(workflow_dir, manifest["image"]) if not os.access(image_path, os.R_OK): self.warning("Could not read %s", image_path) info["image"] = None else: with open(image_path, "rb") as fin: info["image"] = {"name": manifest["image"], "data": fin.read()} else: info["image"] = None mins, secs = divmod(time() - self.launcher.start_time, 60) hours, mins = divmod(mins, 60) days, hours = divmod(hours, 24) info.update({"days": days, "hours": hours, "mins": mins, "secs": secs}) if self.loader_unit is not None: unit = self.loader_unit if hasattr(unit, "labels_mapping"): info.update({"labels": tuple(unit.labels_mapping)}) if (hasattr(unit, "test_diff_labels") and hasattr(unit, "valid_diff_labels") and hasattr(unit, "train_diff_labels")): info.update({"label_stats": (dict(unit.test_diff_labels), dict(unit.valid_diff_labels), dict(unit.train_diff_labels))}) info.update({"class_lengths": tuple(unit.class_lengths), "total_samples": unit.total_samples, "epochs": unit.epoch_number, "normalization": unit.normalization_type, "normalization_parameters": unit.normalization_parameters}) if hasattr(unit, "target_normalizer"): info.update({ "target_normalization": unit.target_normalization_type, "target_normalization_parameters": unit.target_normalization_parameters}) info["results"] = results = {} for prov in self.result_providers: results.update(prov.get_metric_values()) return info