def test_experiment_config(): test_project = Project("./test_project") config = test_project.parse_config() t = ml.Experiment().components(("nodes.observations", { "attr": "node" }), ("workers.interactive", { "id": 2 })) node, components, resources = list(parse_experiment(t))[0] conf = ConfigInterface(config, t.specification["version"]) node_config = conf.get(node)["args"] worker_component_config = conf.get(components[0])["args"] assert node_config["attr"] == "node" assert worker_component_config["attr"] == "worker" t = (ml.Experiment().components(("nodes.observations", { "attr": "node" }), ("workers.interactive", { "id": 2 })).version("~test")) node, components, resources = list(parse_experiment(t))[0] conf = ConfigInterface(config, t.specification["version"]) node_config = conf.get(node)["args"] assert node_config["version"] == 0 worker_component_config = conf.get(components[0])["args"] assert worker_component_config["version"] == 1
def test_computable_resources(): test_project = Project("./test_project") t = ml.Experiment().component( "thenode", resources=lambda engine, component, components: {"test": component.config["alpha"]}, ) # only compute resources for engines that support a resource specification e = ml.Execution(t, project=test_project, engine="native").set_schedule() assert e.schedule._elements[0][3] is None e = e.set_engine("slurm").set_schedule() assert e.schedule._elements[0][3]["--test"] == 0 # default resources t = ml.Experiment().component("nodes.observations") e = ml.Execution(t, project=test_project, engine="slurm").set_schedule() assert e.schedule._elements[0][3]["used_engine"] == "Slurm"
def test_hidden_mixins(): sys.path.insert(0, os.path.join(os.getcwd(), "test_project")) # hidden mixins that are only part of the imported project but not referenced in the project that imports them assert ( ml.execute( ml.Experiment().components("inherited_mixin"), project="./test_project" ).failures == 0 ) assert ( ml.execute( ml.Experiment().components("direct_mixin_inheritance"), project="./test_project", ).failures == 0 )
def test_ray_engine_tune(): import ray from ray import tune ray.init(ignore_reinit_error=True) ml.execute( ml.Experiment().components("tunemodel").tune( stop={"acc": 0.5}, config={"lr": tune.grid_search([0.001, 0.01])}), engine="ray", storage="./_test_data/tune", project="./test_project", )
def test_exception_handling(): sys.path.insert(0, os.path.join(os.getcwd(), "test_project")) from test_project.failure.exceptions import ExceptionsComponent ex = ExceptionsComponent() status = ex.dispatch([], {"components": "12345"}) assert isinstance(status, ml.core.exceptions.ExecutionException) ml.execute( ml.Experiment().components("failure.exceptions"), project="./test_project" ) # a failure does not crash others import ray ray.init(ignore_reinit_error=True) ml.execute( ml.Experiment() .components("failure.exceptions") .components("thenode") .repeat(2), project="./test_project", )
def test_experiment_serialization(): t = (ml.Experiment().components( "test", [ ("test", [{ "a": i } for i in range(3)]), ("test", [{ "a": i } for i in range(3)]), ], ).repeat(2)) json = t.to_json() t_ = ml.Experiment.from_json(json) assert str(t.specification) == str(t_.specification)
def pytest_sessionstart(session): if "DISABLE_STORAGE_GENERATION" in os.environ: return # setup storage test data path = Helpers.tmp_directory("storage") assert (ml.execute( ml.Experiment().components(("nodes.observations", { "id": 1 })).repeat(3), path, project="./test_project", ).failures == 0) assert (ml.execute( ml.Experiment().components(("nodes.observations", { "id": 2 }), "thechildren").repeat(2), path, project="./test_project", ).failures == 0) assert (ml.execute( ml.Experiment().components(("nodes.observations", { "id": 3, "test": True })).repeat(4), path, seed="tttttt", project="./test_project", ).failures == 0) # sub-experiments assert (ml.execute( ml.Experiment().component("nodes.observations"), { "url": os.path.join(path, "tttttt"), "directory": "subexperiment" }, seed="SUBEXP", project="./test_project", ).failures == 0) assert (ml.execute( ml.Experiment().component("nodes.observations"), { "url": os.path.join(path, "tttttt"), "directory": "sub/test" }, project="./test_project", ).failures == 0) assert (ml.execute( ml.Experiment().components(("nodes.observations", { "id": 4 })), os.path.join(path, "subdirectory"), seed="TTTTTT", project="./test_project", ).failures == 0) assert (ml.execute( ml.Experiment().components(("nodes.observations", { "id": 4, "corrupt": True })), path, seed="corupt", project="./test_project", ).failures == 0) # corrupt some data shutil.rmtree(os.path.join(path, "corupt"), ignore_errors=True)
def test_default_component(): test_project = Project("./test_project") t = ml.Experiment().component("uses_default_module") ml.execute(t, project=test_project)
def test_project_export(helpers): path = helpers.tmp_directory("export") ml.Execution( experiment=ml.Experiment().components("nodes.observations", "export_model"), project="./test_project", ).export(path=path + "/test",)
def test_interaction(): ml.execute( ml.Experiment().components("thenode", "workers.interactive"), project="./test_project", )
def test_native_engine_multiprocessing(): t = ml.Experiment().components("thenode", "thechildren").repeat(5) ml.execute(t, engine="native:1", project="./test_project") # failure t = ml.Experiment().components("failure.exceptions").repeat(5) ml.execute(t, engine="native:1", project="./test_project")
def test_native_engine(): t = ml.Experiment().components("thenode", "thechildren").repeat(2) ml.execute(t, engine=None, project="./test_project")
def test_experiment_parser(): # repeat behaviour t = ml.Experiment().components("test") assert len(list(parse_experiment(t))) == 1 t = ml.Experiment().components("test").repeat(1) assert len(list(parse_experiment(t))) == 1 t = ml.Experiment().components("test").repeat(5) spec = list(parse_experiment(t)) assert len(spec) == 5 seeding_test(spec) t = ml.Experiment().components(("test", "~v"), ("test", "~v")).repeat(3) spec = list(parse_experiment(t)) assert len(spec) == 3 seeding_test(spec) t = ml.Experiment().components("test").repeat(3).split(2) spec = list(parse_experiment(t)) assert len(spec) == 6 seeding_test(spec) t = ml.Experiment().components(("test", [{"a": i} for i in range(3)])) spec = list(parse_experiment(t)) assert len(spec) == 3 seeding_test(spec) t = ml.Experiment().components(("test", [{ "a": i } for i in range(3)])).repeat(3) spec = list(parse_experiment(t)) assert len(spec) == 9 seeding_test(spec) t = ml.Experiment().components("test", ("test", [{ "a": i } for i in range(3)])) spec = list(parse_experiment(t)) assert len(spec) == 3 seeding_test(spec) t = ml.Experiment().components( "test", [ ("test", [{ "a": i } for i in range(3)]), ("test", [{ "a": i } for i in range(3)]), ], ) spec = list(parse_experiment(t)) assert len(spec) == 9 seeding_test(spec) t = ml.Experiment().components("test", ("test", [{ "sub": lr } for lr in range(5)])) spec = list(parse_experiment(t)) assert len(spec) == 5 seeding_test(spec)