Exemple #1
0
def finetune(config_file: Path = None, gpu: int = None):
    """Finetune a pre-trained model.

    Parameters
    ----------
    config_file : Path, optional
        Path to an additional config file. Each config argument in this file will overwrite the original run config.
        The config file for finetuning must contain the argument `base_run_dir`, pointing to the folder of the 
        pre-trained model.
    gpu : int, optional
        GPU id to use. Will override config argument 'device'.

    """
    # load finetune config, extract base run dir, load base run config and combine with the finetune arguments
    temp_config = Config(config_file)
    config = Config(temp_config.base_run_dir / "config.yml")
    config.force_update({'run_dir': None, 'experiment_name': None})
    config.update_config(config_file)
    config.is_finetuning = True

    # check if a GPU has been specified as command line argument. If yes, overwrite config
    if gpu is not None:
        config.device = f"cuda:{gpu}"

    start_training(config)
Exemple #2
0
    def __init__(self, cfg: Config):
        self._train = True
        self.log_interval = cfg.log_interval
        self.log_dir = cfg.run_dir
        self._img_log_dir = cfg.img_log_dir

        # get git commit hash if folder is a git repository
        current_dir = str(Path(__file__).absolute().parent)
        if subprocess.call(["git", "-C", current_dir, "branch"],
                           stderr=subprocess.DEVNULL,
                           stdout=subprocess.DEVNULL) == 0:
            git_output = subprocess.check_output(
                ["git", "-C", current_dir, "describe", "--always"])
            cfg.force_update(key='commit_hash',
                             value=git_output.strip().decode('ascii'))

        # Additionally, the package version is stored in the config
        cfg.force_update(key="package_version", value=__version__)

        # store a copy of the config into the run folder
        cfg.dump_config(folder=self.log_dir)

        self.epoch = 0
        self.update = 0
        self._metrics = defaultdict(list)
        self.writer = None
Exemple #3
0
def create_config_files(base_config_path: Path, modify_dict: Dict[str, list],
                        output_dir: Path):
    """Create configs, given a base config and a dictionary of parameters to modify.
    
    This function will create one config file for each combination of parameters defined in the modify_dict.
    
    Parameters
    ----------
    base_config_path : Path
        Path to a base config file (.yml)
    modify_dict : dict
        Dictionary, mapping from parameter names to lists of possible parameter values.
    output_dir : Path 
        Path to a folder where the generated configs will be stored
    """
    if not output_dir.is_dir():
        output_dir.mkdir(parents=True)

    # load base config as dictionary
    base_config = Config(base_config_path)
    experiment_name = base_config.experiment_name
    option_names = list(modify_dict.keys())

    # iterate over each possible combination of hyper parameters
    for i, options in enumerate(
            itertools.product(*[val for val in modify_dict.values()])):

        for key, val in zip(option_names, options):
            base_config.force_update(key=key, value=val)

        # create a unique run name
        name = experiment_name
        for key, val in zip(option_names, options):
            name += f"_{key}{val}"
        base_config.force_update(key="experiment_name", value=name)

        base_config.dump_config(output_dir, f"config_{i+1}.yml")

    print(f"Finished. Configs are stored in {output_dir}")