def test_core_config_load_dump(path, serializer, tmpdir):
    """Test Config.load."""
    config = {"x": 1}
    path = str(tmpdir.join(path))

    if serializer is None:
        # Incorrect path (not supported)
        with pytest.raises(ValueError):
            fromconfig.dump(config, path)

        with pytest.raises(ValueError):
            fromconfig.load(path)

    else:
        # Dump config to file
        with Path(path).open("w") as file:
            serializer.dump(config, file)

        # Read content of the dump
        with Path(path).open() as file:
            content = file.read()

        # Reload
        reloaded = fromconfig.load(path)
        assert reloaded == config

        # Dump with config method and check content is the same as before
        fromconfig.dump(reloaded, path)
        with Path(path).open() as file:
            assert file.read() == content
def test_core_config_no_jsonnet(tmpdir, monkeypatch):
    """Test jsonnet missing handling."""
    monkeypatch.setattr(fromconfig.core.config, "_jsonnet", None)

    # No issue to dump even if missing
    config = {"x": 2}
    fromconfig.dump(config, str(tmpdir.join("config.jsonnet")))
    fromconfig.dump(config, str(tmpdir.join("config.json")))
    fromconfig.dump(config, str(tmpdir.join("config.yaml")))
    fromconfig.dump(config, str(tmpdir.join("config.yml")))

    # No issue to load non-jsonnet files
    assert fromconfig.load(str(tmpdir.join("config.json"))) == config
    assert fromconfig.load(str(tmpdir.join("config.yaml"))) == config
    assert fromconfig.load(str(tmpdir.join("config.yml"))) == config

    # Raise import error if reloading from jsonnet
    with pytest.raises(ImportError):
        fromconfig.load(str(tmpdir.join("config.jsonnet")))
Example #3
0
def main(
    *paths: str,
    use_mlflow: bool = False,
    run_name: str = None,
    run_id: str = None,
    tracking_uri: str = None,
    experiment_name: str = None,
    artifact_location: str = None,
):
    """Command line with MlFlow support."""
    if not paths:
        return main

    # Load configs and merge them
    configs = [fromconfig.load(path) for path in paths]
    config = functools.reduce(fromconfig.utils.merge_dict, configs)

    # Parse merged config
    parser = fromconfig.parser.DefaultParser()
    parsed = parser(config)

    if use_mlflow:  # Create run, log configs and parameters
        # Configure MlFlow
        if tracking_uri is not None:
            mlflow.set_tracking_uri(tracking_uri)
        if experiment_name is not None:
            if mlflow.get_experiment_by_name(experiment_name) is None:
                mlflow.create_experiment(name=experiment_name,
                                         artifact_location=artifact_location)
            mlflow.set_experiment(experiment_name)

        # Start run (cannot use context because of python Fire)
        run = mlflow.start_run(run_id=run_id, run_name=run_name)

        # Log run information
        url = f"{mlflow.get_tracking_uri()}/#/experiments/{run.info.experiment_id}/runs/{run.info.run_id}"
        LOGGER.info(f"MlFlow Run Initialized: {url}")

        # Save merged and parsed config to MlFlow
        dir_artifacts = tempfile.mkdtemp()
        with Path(dir_artifacts, "config.json").open("w") as file:
            json.dump(config, file, indent=4)
        with Path(dir_artifacts, "parsed.json").open("w") as file:
            json.dump(parsed, file, indent=4)
        mlflow.log_artifacts(local_dir=dir_artifacts)

        # Log flattened parameters
        for key, value in fromconfig.utils.flatten(parsed):
            mlflow.log_param(key=key, value=value)

    return fromconfig.fromconfig(parsed)
Example #4
0
def run(*paths: str):
    """Load config, parse and instantiate.

    Parameters
    ----------
    *paths : str
        Paths to config files.
    """
    # If no paths, return run to get fire help
    if not paths:
        return run

    # Load configs and merge them
    configs = [fromconfig.load(path) for path in paths]
    config = functools.reduce(fromconfig.utils.merge_dict, configs)

    # Parse merged config
    parser = fromconfig.parser.DefaultParser()
    parsed = parser(config)

    # Instantiate and return
    return fromconfig.fromconfig(parsed)
Example #5
0
"""Manual Hyper Parameter search example."""

import fromconfig

if __name__ == "__main__":
    config = fromconfig.load("config.yaml")
    parser = fromconfig.parser.DefaultParser()
    for learning_rate in [0.01, 0.1]:
        params = {"learning_rate": learning_rate}
        parsed = parser({**config, "hparams": params})
        model = fromconfig.fromconfig(parsed["model"])
        model.train()
        # Clear the singletons if any as we most likely don't want
        # to share between configs
        fromconfig.parser.singleton.clear()
Example #6
0
def convert(path_input, path_output):
    """Convert input into output with load and dump."""
    fromconfig.dump(fromconfig.load(path_input), path_output)
Example #7
0
"""Manual quickstart."""

import functools

import fromconfig

import model

if __name__ == "__main__":
    # Load configs from yaml and merge into one dictionary
    paths = ["config.yaml", "params.yaml"]
    configs = [fromconfig.load(path) for path in paths]
    config = functools.reduce(fromconfig.utils.merge_dict, configs)

    # Parse the config (resolve interpolation)
    parser = fromconfig.parser.DefaultParser()
    parsed = parser(config)

    # Instantiate one of the keys
    instance = fromconfig.fromconfig(parsed["model"])
    assert isinstance(instance, model.Model)
    instance.train()

    # You can also use the DefaultLauncher that replicates the CLI
    launcher = fromconfig.launcher.DefaultLauncher()
    launcher(config, "model - train")