def _print_search_path(self, config_name: Optional[str], overrides: List[str]) -> None: assert log is not None log.debug("") self._log_header(header="Config search path", filler="*") box: List[List[str]] = [["Provider", "Search path"]] cfg = self.compose_config( config_name=config_name, overrides=overrides, run_mode=RunMode.RUN, with_log_configuration=False, ) HydraConfig.instance().set_config(cfg) cfg = self.get_sanitized_cfg(cfg, cfg_type="hydra") sources = cfg.hydra.runtime.config_sources for sp in sources: box.append([sp.provider, f"{sp.schema}://{sp.path}"]) provider_pad, search_path_pad = get_column_widths(box) header = "| {} | {} |".format("Provider".ljust(provider_pad), "Search path".ljust(search_path_pad)) self._log_header(header=header, filler="-") for source in sources: log.debug("| {} | {} |".format( source.provider.ljust(provider_pad), f"{source.schema}://{source.path}".ljust(search_path_pad), )) self._log_footer(header=header, filler="-")
def dispatch_job( idx: int, overrides: Sequence[str], config_loader: ConfigLoader, config: DictConfig, task_function: TaskFunction, singleton_state: Dict[Any, Any], ) -> JobReturn: """Calls `run_job` in parallel Note that Joblib's default backend runs isolated Python processes, see https://joblib.readthedocs.io/en/latest/parallel.html#shared-memory-semantics """ setup_globals() Singleton.set_state(singleton_state) log.info("\t#{} : {}".format(idx, " ".join(filter_overrides(overrides)))) sweep_config = config_loader.load_sweep_config(config, list(overrides)) with open_dict(sweep_config): sweep_config.hydra.job.id = "{}_{}".format(sweep_config.hydra.job.name, idx) sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) ret = run_job( config=sweep_config, task_function=task_function, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", ) return ret
def run_maze_job(hydra_overrides: Dict[str, str], config_module: str, config_name: str) -> DictConfig: """Runs rollout with the given config overrides using maze_run. :param hydra_overrides: Config overrides for hydra. :param config_module: The config module. :param config_name: The name of the default config. """ with initialize_config_module(config_module=config_module): # Config is relative to a module # For the HydraConfig init below, we need the hydra key there as well (=> return_hydra_config=True) cfg = compose(config_name=config_name, overrides=[key + "=" + str(val) for key, val in hydra_overrides.items()], return_hydra_config=True) # Init the HydraConfig: This is when Hydra actually creates the output dir and changes into it # (otherwise we only have the config object, but not the full run environment) HydraConfig.instance().set_config(cfg) # For the rollout itself, the Hydra config should not be there anymore with open_dict(cfg): del cfg["hydra"] # Run the rollout maze_run(cfg) return cfg
def execute_job( idx: int, overrides: Sequence[str], config_loader: ConfigLoader, config: DictConfig, task_function: TaskFunction, singleton_state: Dict[Any, Any], ) -> JobReturn: """Calls `run_job` in parallel """ setup_globals() Singleton.set_state(singleton_state) sweep_config = config_loader.load_sweep_config(config, list(overrides)) with open_dict(sweep_config): sweep_config.hydra.job.id = "{}_{}".format(sweep_config.hydra.job.name, idx) sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) ret = run_job( config=sweep_config, task_function=task_function, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", ) return ret
def launch( self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int ) -> Sequence[JobReturn]: setup_globals() assert self.config is not None assert self.task_function is not None assert self.config_loader is not None configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) sweep_dir = self.config.hydra.sweep.dir Path(str(sweep_dir)).mkdir(parents=True, exist_ok=True) log.info(f"Launching {len(job_overrides)} jobs locally") runs: List[JobReturn] = [] for idx, overrides in enumerate(job_overrides): idx = initial_job_idx + idx lst = " ".join(filter_overrides(overrides)) log.info(f"\t#{idx} : {lst}") sweep_config = self.config_loader.load_sweep_config( self.config, list(overrides) ) with open_dict(sweep_config): sweep_config.hydra.job.id = idx sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) ret = run_job( config=sweep_config, task_function=self.task_function, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", ) runs.append(ret) configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) return runs
def multirun( self, config_name: Optional[str], task_function: TaskFunction, overrides: List[str], with_log_configuration: bool = True, ) -> Any: # Initial config is loaded without strict (individual job configs may have strict). cfg = self.compose_config( config_name=config_name, overrides=overrides, strict=False, with_log_configuration=with_log_configuration, run_mode=RunMode.MULTIRUN, ) HydraConfig.instance().set_config(cfg) sweeper = Plugins.instance().instantiate_sweeper( config=cfg, config_loader=self.config_loader, task_function=task_function) task_overrides = OmegaConf.to_container(cfg.hydra.overrides.task, resolve=False) assert isinstance(task_overrides, list) return sweeper.sweep(arguments=task_overrides)
def launch( self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int ) -> Sequence[JobReturn]: setup_globals() assert self.hydra_context is not None assert self.config is not None assert self.task_function is not None configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) sweep_dir = self.config.hydra.sweep.dir Path(str(sweep_dir)).mkdir(parents=True, exist_ok=True) log.info("Launching {} jobs on slurm".format(len(job_overrides))) runs: List[JobReturn] = [] for idx, overrides in enumerate(job_overrides): idx = initial_job_idx + idx lst = " ".join(filter_overrides(overrides)) log.info(f"\t#{idx} : {lst}") sweep_config = self.hydra_context.config_loader.load_sweep_config( self.config, list(overrides) ) with open_dict(sweep_config): sweep_config.hydra.job.id = idx sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) log.info("\tJob name : {}".format(slurm_utils.resolve_name(sweep_config.slurm.job_name))) slurm_utils.write_slurm(sweep_config) slurm_utils.write_sh(sweep_config, " ".join(filter_overrides(overrides))) slurm_utils.launch_job(sweep_config) configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) if sweep_config.wait: time.sleep(1) return runs
def launch_jobs(temp_dir: str) -> None: runs = [] with open(os.path.join(temp_dir, JOB_SPEC_PICKLE), "rb") as f: job_spec = pickle.load(f) # nosec singleton_state = job_spec["singleton_state"] sweep_configs = job_spec["sweep_configs"] task_function = job_spec["task_function"] instance_id = _get_instance_id() sweep_dir = None for sweep_config in sweep_configs: with open_dict(sweep_config): sweep_config.hydra.job.id = ( f"{instance_id}_{sweep_config.hydra.job.num}" ) setup_globals() Singleton.set_state(singleton_state) HydraConfig.instance().set_config(sweep_config) ray_init_cfg = sweep_config.hydra.launcher.ray_init_cfg ray_remote_cfg = sweep_config.hydra.launcher.ray_remote_cfg if not sweep_dir: sweep_dir = Path(str(HydraConfig.get().sweep.dir)) sweep_dir.mkdir(parents=True, exist_ok=True) start_ray(ray_init_cfg) ray_obj = launch_job_on_ray( ray_remote_cfg, sweep_config, task_function, singleton_state ) runs.append(ray_obj) result = [ray.get(run) for run in runs] _dump_job_return(result, temp_dir)
def launch(self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int) -> Sequence[JobReturn]: """ :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run. :param initial_job_idx: Initial job idx in batch. :return: an array of return values from run_job with indexes corresponding to the input list indexes. """ setup_globals() assert self.config is not None assert self.config_loader is not None assert self.task_function is not None configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) sweep_dir = Path(str(self.config.hydra.sweep.dir)) sweep_dir.mkdir(parents=True, exist_ok=True) log.info( f"Example Launcher(foo={self.foo}, bar={self.bar}) is launching {len(job_overrides)} jobs locally" ) log.info(f"Sweep output dir : {sweep_dir}") runs = [] for idx, overrides in enumerate(job_overrides): idx = initial_job_idx + idx lst = " ".join(filter_overrides(overrides)) log.info(f"\t#{idx} : {lst}") sweep_config = self.config_loader.load_sweep_config( self.config, list(overrides)) with open_dict(sweep_config): # This typically coming from the underlying scheduler (SLURM_JOB_ID for instance) # In that case, it will not be available here because we are still in the main process. # but instead should be populated remotely before calling the task_function. sweep_config.hydra.job.id = f"job_id_for_{idx}" sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) # If your launcher is executing code in a different process, it is important to restore # the singleton state in the new process. # To do this, you will likely need to serialize the singleton state along with the other # parameters passed to the child process. # happening on this process (executing launcher) state = Singleton.get_state() # happening on the spawned process (executing task_function in run_job) Singleton.set_state(state) ret = run_job( config=sweep_config, task_function=self.task_function, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", ) runs.append(ret) # reconfigure the logging subsystem for Hydra as the run_job call configured it for the Job. # This is needed for launchers that calls run_job in the same process and not spawn a new one. configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) return runs
def run_job( config: DictConfig, task_function: TaskFunction, job_dir_key: str, job_subdir_key: Optional[str], configure_logging: bool = True, ) -> "JobReturn": old_cwd = os.getcwd() working_dir = str(OmegaConf.select(config, job_dir_key)) orig_hydra_cfg = HydraConfig.instance().cfg if job_subdir_key is not None: # evaluate job_subdir_key lazily. # this is running on the client side in sweep and contains things such as job:id which # are only available there. subdir = str(OmegaConf.select(config, job_subdir_key)) working_dir = os.path.join(working_dir, subdir) try: ret = JobReturn() ret.working_dir = working_dir task_cfg = copy.deepcopy(config) hydra_cfg = OmegaConf.masked_copy(task_cfg, "hydra") # maintain parent to preserve interpolation links from hydra_cfg to job_cfg hydra_cfg._set_parent(task_cfg) with read_write(task_cfg): with open_dict(task_cfg): del task_cfg["hydra"] HydraConfig.instance().cfg = hydra_cfg # type: ignore ret.cfg = task_cfg ret.hydra_cfg = hydra_cfg overrides = OmegaConf.to_container(config.hydra.overrides.task) assert isinstance(overrides, list) ret.overrides = overrides # handle output directories here Path(str(working_dir)).mkdir(parents=True, exist_ok=True) os.chdir(working_dir) if configure_logging: configure_log(config.hydra.job_logging, config.hydra.verbose) if config.hydra.output_subdir is not None: hydra_output = Path(config.hydra.output_subdir) _save_config(task_cfg, "config.yaml", hydra_output) _save_config(hydra_cfg, "hydra.yaml", hydra_output) _save_config(config.hydra.overrides.task, "overrides.yaml", hydra_output) with env_override(hydra_cfg.hydra.job.env_set): ret.return_value = task_function(task_cfg) ret.task_name = JobRuntime.instance().get("name") _flush_loggers() return ret finally: HydraConfig.instance().cfg = orig_hydra_cfg os.chdir(old_cwd)
def test_foo(restore_singletons: Any) -> Any: utils.setup_globals() config_loader = ConfigLoaderImpl( config_search_path=create_config_search_path( "pkg://hydra.test_utils.configs")) cfg = config_loader.load_configuration( config_name="accessing_hydra_config", overrides=[]) HydraConfig.instance().set_config(cfg) with open_dict(cfg): del cfg["hydra"] assert cfg.job_name == "UNKNOWN_NAME" assert cfg.config_name == "accessing_hydra_config"
def _run_job( sweep_config: DictConfig, task_function: TaskFunction, singleton_state: Dict[Any, Any], ) -> JobReturn: setup_globals() Singleton.set_state(singleton_state) HydraConfig.instance().set_config(sweep_config) return run_job( config=sweep_config, task_function=task_function, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", )
def launch(self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int) -> Sequence[JobReturn]: """ :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run. :param initial_job_idx: Initial job idx in batch. :return: an array of return values from run_job with indexes corresponding to the input list indexes. """ setup_globals() assert self.config is not None assert self.config_loader is not None assert self.task_function is not None configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) sweep_dir = Path(str(self.config.hydra.sweep.dir)) sweep_dir.mkdir(parents=True, exist_ok=True) log.info( "Example Launcher(foo={}, bar={}) is launching {} jobs locally". format(self.foo, self.bar, len(job_overrides))) log.info("Sweep output dir : {}".format(sweep_dir)) runs = [] for idx, overrides in enumerate(job_overrides): idx = initial_job_idx + idx log.info("\t#{} : {}".format(idx, " ".join( filter_overrides(overrides)))) sweep_config = self.config_loader.load_sweep_config( self.config, list(overrides)) with open_dict(sweep_config): # This typically coming from the underlying scheduler (SLURM_JOB_ID for instance) # In that case, it will not be available here because we are still in the main process. # but instead should be populated remotely before calling the task_function. sweep_config.hydra.job.id = "job_id_for_{}".format(idx) sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) ret = run_job( config=sweep_config, task_function=self.task_function, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", ) runs.append(ret) # reconfigure the logging subsystem for Hydra as the run_job call configured it for the Job. # This is needed for launchers that calls run_job in the same process and not spawn a new one. configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose) return runs
def test_configuration_set_via_cmd_and_default_config( sweep_runner: TSweepRunner, # noqa: F811 ) -> None: sweep = sweep_runner( calling_file=os.path.dirname(os.path.abspath(__file__)), calling_module=None, task_function=quadratic, config_path="tests/config", config_name="default_quadratic.yaml", overrides=[ "hydra/sweeper=ax", "hydra/launcher=basic", "hydra.sweeper.params.ax_config.client.random_seed=1", "hydra.sweeper.params.ax_config.max_trials=2", "hydra.sweeper.params.ax_config.early_stop.max_epochs_without_improvement=2", "quadratic.x=-5:-2", "quadratic.y=-1:1", ], ) with sweep: ax_config = HydraConfig.instance().hydra.sweeper.params.ax_config assert ax_config.max_trials == 2 assert ax_config.early_stop.max_epochs_without_improvement == 2 assert ax_config.experiment.minimize is True assert sweep.returns is None returns = OmegaConf.load(f"{sweep.temp_dir}/optimization_results.yaml") assert isinstance(returns, DictConfig) best_parameters = returns["ax"] assert "quadratic.x" in best_parameters assert "quadratic.y" in best_parameters
def run( self, config_name: Optional[str], task_function: TaskFunction, overrides: List[str], ) -> JobReturn: cfg = self.compose_config(config_name=config_name, overrides=overrides, with_log_configuration=True) HydraConfig.instance().set_config(cfg) return run_job( config=cfg, task_function=task_function, job_dir_key="hydra.run.dir", job_subdir_key=None, )
def _print_config_info(self, config_name: Optional[str], overrides: List[str]) -> None: assert log is not None self._print_search_path(config_name=config_name, overrides=overrides) self._print_defaults_tree(config_name=config_name, overrides=overrides) self._print_defaults_list(config_name=config_name, overrides=overrides) cfg = run_and_report(lambda: self.compose_config( config_name=config_name, overrides=overrides, run_mode=RunMode.RUN, with_log_configuration=False, )) HydraConfig.instance().set_config(cfg) self._log_header(header="Config", filler="*") with flag_override(cfg, ["struct", "readonly"], [False, False]): del cfg["hydra"] log.info(OmegaConf.to_yaml(cfg))
def app_help(self, config_name: Optional[str], args_parser: ArgumentParser, args: Any) -> None: cfg = self.compose_config( config_name=config_name, overrides=args.overrides, run_mode=RunMode.RUN, with_log_configuration=True, ) HydraConfig.instance().set_config(cfg) help_cfg = cfg.hydra.help clean_cfg = copy.deepcopy(cfg) clean_cfg = self.get_sanitized_cfg(clean_cfg, "job") help_text = self.get_help(help_cfg, clean_cfg, args_parser, resolve=args.resolve) print(help_text)
def execute_job( idx: int, overrides: Sequence[str], config_loader: ConfigLoader, config: DictConfig, task_function: TaskFunction, singleton_state: Dict[Any, Any], cmd_prefix: str, tsp_prefix: str, ) -> JobReturn: """Calls `run_job` in parallel """ setup_globals() Singleton.set_state(singleton_state) lst = " ".join(overrides) sweep_config = config_loader.load_sweep_config(config, list(overrides)) with open_dict(sweep_config): sweep_config.hydra.job.id = "{}_{}".format(sweep_config.hydra.job.name, idx) sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) def tsp_task_function(task_cfg): working_dir = os.getcwd() cmd = f"{cmd_prefix} {lst}" log.info(f"\t#{idx} : {lst}") cmd = f"cd {hydra.utils.get_original_cwd()} && {cmd} hydra.run.dir={working_dir}" job_id = int(subprocess.check_output(cmd, shell=True).rstrip()) log.info( f"Submitted {idx} to TaskSpooler. View logs: {tsp_prefix} -t {job_id}" ) return job_id ret = run_job( config=sweep_config, task_function=tsp_task_function, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", ) ret.id = ret.return_value return ret
def multirun( self, config_name: Optional[str], task_function: TaskFunction, overrides: List[str], ) -> Any: # Initial config is loaded without strict (individual job configs may have strict). cfg = self.compose_config( config_name=config_name, overrides=overrides, strict=False, with_log_configuration=True, ) HydraConfig.instance().set_config(cfg) sweeper = Plugins.instance().instantiate_sweeper( config=cfg, config_loader=self.config_loader, task_function=task_function ) task_overrides = cfg.hydra.overrides.task return sweeper.sweep(arguments=task_overrides)
def run_job( config: DictConfig, task_function: TaskFunction, job_dir_key: str, job_subdir_key: Optional[str], ) -> "JobReturn": old_cwd = os.getcwd() working_dir = str(config.select(job_dir_key)) if job_subdir_key is not None: # evaluate job_subdir_key lazily. # this is running on the client side in sweep and contains things such as job:id which # are only available there. subdir = str(config.select(job_subdir_key)) working_dir = os.path.join(working_dir, subdir) try: ret = JobReturn() ret.working_dir = working_dir task_cfg = copy.deepcopy(config) del task_cfg["hydra"] ret.cfg = task_cfg ret.hydra_cfg = OmegaConf.create( {"hydra": HydraConfig.instance().hydra}) overrides = OmegaConf.to_container(config.hydra.overrides.task) assert isinstance(overrides, list) ret.overrides = overrides # handle output directories here Path(str(working_dir)).mkdir(parents=True, exist_ok=True) os.chdir(working_dir) hydra_output = Path(config.hydra.output_subdir) configure_log(config.hydra.job_logging, config.hydra.verbose) hydra_cfg = OmegaConf.masked_copy(config, "hydra") assert isinstance(hydra_cfg, DictConfig) _save_config(task_cfg, "config.yaml", hydra_output) _save_config(hydra_cfg, "hydra.yaml", hydra_output) _save_config(config.hydra.overrides.task, "overrides.yaml", hydra_output) ret.return_value = task_function(task_cfg) ret.task_name = JobRuntime.instance().get("name") # shut down logging to ensure job log files are closed. # If logging is still required after run_job caller is responsible to re-initialize it. logging.shutdown() return ret finally: os.chdir(old_cwd)
def show_cfg( self, config_name: Optional[str], overrides: List[str], cfg_type: str, package: Optional[str], resolve: bool = False, ) -> None: cfg = self.compose_config( config_name=config_name, overrides=overrides, run_mode=RunMode.RUN, with_log_configuration=False, ) HydraConfig.instance().set_config(cfg) OmegaConf.set_readonly(cfg.hydra, None) cfg = self.get_sanitized_cfg(cfg, cfg_type) if package == "_global_": package = None if package is None: ret = cfg else: ret = OmegaConf.select(cfg, package) if ret is None: sys.stderr.write(f"package '{package}' not found in config\n") sys.exit(1) if not isinstance(ret, Container): print(ret) else: if package is not None: print(f"# @package {package}") if resolve: OmegaConf.resolve(ret) sys.stdout.write(OmegaConf.to_yaml(ret))
def maze_run(cfg: DictConfig) -> Optional[float]: """ Run a CLI task based on the provided configuration. A runner object is instantiated according to the config (cfg.runner) and it is then handed the whole configuration object (cfg). Runners can perform various tasks such as rollouts, trainings etc. :param cfg: Hydra configuration for the rollout. :return: In case of a multirun it returns the maximum mean reward (required for hyper parameter optimization). For regular runs nothing is returned. """ # check if we are currently in a --multirun instance = HydraConfig.instance() is_multi_run = instance.cfg is not None and instance.cfg.hydra.job.get( "num") is not None # regular single runs if not is_multi_run: _run_job(cfg) # multirun (e.g., gird search, nevergrad, ...) else: max_mean_reward = _run_multirun_job(cfg) return max_mean_reward
def get_original_cwd() -> str: ret = HydraConfig.instance().hydra.runtime.cwd assert ret is not None and isinstance(ret, str) return ret
def run_job( task_function: TaskFunction, config: DictConfig, job_dir_key: str, job_subdir_key: Optional[str], configure_logging: bool = True, hydra_context: Optional[HydraContext] = None, ) -> "JobReturn": callbacks = _get_callbacks_for_run_job(hydra_context) old_cwd = os.getcwd() orig_hydra_cfg = HydraConfig.instance().cfg output_dir = str(OmegaConf.select(config, job_dir_key)) if job_subdir_key is not None: # evaluate job_subdir_key lazily. # this is running on the client side in sweep and contains things such as job:id which # are only available there. subdir = str(OmegaConf.select(config, job_subdir_key)) output_dir = os.path.join(output_dir, subdir) with read_write(config.hydra.runtime): with open_dict(config.hydra.runtime): config.hydra.runtime.output_dir = os.path.abspath(output_dir) HydraConfig.instance().set_config(config) _chdir = None try: ret = JobReturn() task_cfg = copy.deepcopy(config) with read_write(task_cfg): with open_dict(task_cfg): del task_cfg["hydra"] ret.cfg = task_cfg hydra_cfg = copy.deepcopy(HydraConfig.instance().cfg) assert isinstance(hydra_cfg, DictConfig) ret.hydra_cfg = hydra_cfg overrides = OmegaConf.to_container(config.hydra.overrides.task) assert isinstance(overrides, list) ret.overrides = overrides # handle output directories here Path(str(output_dir)).mkdir(parents=True, exist_ok=True) _chdir = hydra_cfg.hydra.job.chdir if _chdir is None: url = "https://hydra.cc/docs/upgrades/1.1_to_1.2/changes_to_job_working_dir" deprecation_warning( message=dedent(f"""\ Hydra 1.3 will no longer change working directory at job runtime by default. See {url} for more information."""), stacklevel=2, ) _chdir = True if _chdir: os.chdir(output_dir) ret.working_dir = output_dir else: ret.working_dir = os.getcwd() if configure_logging: configure_log(config.hydra.job_logging, config.hydra.verbose) if config.hydra.output_subdir is not None: hydra_output = Path(config.hydra.runtime.output_dir) / Path( config.hydra.output_subdir) _save_config(task_cfg, "config.yaml", hydra_output) _save_config(hydra_cfg, "hydra.yaml", hydra_output) _save_config(config.hydra.overrides.task, "overrides.yaml", hydra_output) with env_override(hydra_cfg.hydra.job.env_set): callbacks.on_job_start(config=config) try: ret.return_value = task_function(task_cfg) ret.status = JobStatus.COMPLETED except Exception as e: ret.return_value = e ret.status = JobStatus.FAILED ret.task_name = JobRuntime.instance().get("name") _flush_loggers() callbacks.on_job_end(config=config, job_return=ret) return ret finally: HydraConfig.instance().cfg = orig_hydra_cfg if _chdir: os.chdir(old_cwd)
def my_app(_: DictConfig) -> None: print(HydraConfig.instance().get().runtime.output_dir)
def run_job( task_function: TaskFunction, config: DictConfig, job_dir_key: str, job_subdir_key: Optional[str], configure_logging: bool = True, hydra_context: Optional[HydraContext] = None, ) -> "JobReturn": callbacks = _get_callbacks_for_run_job(hydra_context) old_cwd = os.getcwd() orig_hydra_cfg = HydraConfig.instance().cfg HydraConfig.instance().set_config(config) working_dir = str(OmegaConf.select(config, job_dir_key)) if job_subdir_key is not None: # evaluate job_subdir_key lazily. # this is running on the client side in sweep and contains things such as job:id which # are only available there. subdir = str(OmegaConf.select(config, job_subdir_key)) working_dir = os.path.join(working_dir, subdir) try: ret = JobReturn() ret.working_dir = working_dir task_cfg = copy.deepcopy(config) with read_write(task_cfg): with open_dict(task_cfg): del task_cfg["hydra"] ret.cfg = task_cfg hydra_cfg = copy.deepcopy(HydraConfig.instance().cfg) assert isinstance(hydra_cfg, DictConfig) ret.hydra_cfg = hydra_cfg overrides = OmegaConf.to_container(config.hydra.overrides.task) assert isinstance(overrides, list) ret.overrides = overrides # handle output directories here Path(str(working_dir)).mkdir(parents=True, exist_ok=True) os.chdir(working_dir) if configure_logging: configure_log(config.hydra.job_logging, config.hydra.verbose) if config.hydra.output_subdir is not None: hydra_output = Path(config.hydra.output_subdir) _save_config(task_cfg, "config.yaml", hydra_output) _save_config(hydra_cfg, "hydra.yaml", hydra_output) _save_config(config.hydra.overrides.task, "overrides.yaml", hydra_output) with env_override(hydra_cfg.hydra.job.env_set): callbacks.on_job_start(config=config) try: ret.return_value = task_function(task_cfg) ret.status = JobStatus.COMPLETED except Exception as e: ret.return_value = e ret.status = JobStatus.FAILED ret.task_name = JobRuntime.instance().get("name") _flush_loggers() callbacks.on_job_end(config=config, job_return=ret) return ret finally: HydraConfig.instance().cfg = orig_hydra_cfg os.chdir(old_cwd)
def test_get_original_cwd(hydra_restore_singletons: Any) -> None: orig = "/foo/AClass" cfg = OmegaConf.create({"hydra": HydraConf(runtime=RuntimeConf(cwd=orig))}) assert isinstance(cfg, DictConfig) HydraConfig.instance().set_config(cfg) assert utils.get_original_cwd() == orig
def experiment(_cfg: DictConfig) -> None: print(HydraConfig.instance().hydra.job.name)