Ejemplo n.º 1
0
def test_serialization():
    spec = tag_test(base_spec, "test_serialization")
    result = run_local(spec)
    verify_state(result)
    pprint(result.to_dict())
    print(result.to_yaml())
    pprint(result.to_json())
Ejemplo n.º 2
0
def test_run_local_handler():
    spec = tag_test(base_spec, "test_run_local_handler")
    spec.spec.handler = "my_func"
    result = run_local(spec,
                       command=f"{examples_path}/handler.py",
                       workdir=examples_path)
    verify_state(result)
Ejemplo n.º 3
0
def test_run_local_nb():
    spec = tag_test(base_spec, "test_run_local_nb")
    spec.spec.handler = "training"
    result = run_local(spec,
                       command=f"{examples_path}/mlrun_jobs.ipynb",
                       workdir=examples_path)
    verify_state(result)
Ejemplo n.º 4
0
def test_run_local_handler():
    spec = tag_test(base_spec, 'test_run_local_handler')
    spec.spec.handler = 'my_func'
    result = run_local(spec,
                       command='{}/handler.py'.format(examples_path),
                       workdir=examples_path)
    verify_state(result)
Ejemplo n.º 5
0
def test_run_local_nb():
    spec = tag_test(base_spec, 'test_run_local_handler')
    spec.spec.handler = 'training'
    result = run_local(spec,
                       command='{}/mlrun_jobs.ipynb'.format(examples_path),
                       workdir=examples_path)
    verify_state(result)
Ejemplo n.º 6
0
def test_with_params():
    spec = tag_test(base_spec, "test_with_params")
    result = new_function().run(spec, handler=my_func)

    assert result.output("accuracy") == 16, "failed to run"
    assert result.status.artifacts[0].get("key") == "chart", "failed to run"
    assert result.artifact("chart").url, "failed to return artifact data item"
Ejemplo n.º 7
0
def test_handler_hyperlist():
    run_spec = tag_test(base_spec, "test_handler_hyperlist")
    run_spec.spec.param_file = f"{tests_root_directory}/param_file.csv"
    result = new_function().run(run_spec, handler=my_func)
    print(result)
    assert len(result.status.iterations) == 3 + 1, "hyper parameters test failed"
    verify_state(result)
Ejemplo n.º 8
0
def test_handler_project():
    spec = tag_test(base_spec, 'test_handler_project')
    spec.metadata.project = 'myproj'
    spec.metadata.labels = {'owner': 'yaronh'}
    result = new_function().run(spec, handler=my_func)
    print(result)
    assert result.output('accuracy') == 16, 'failed to run'
    verify_state(result)
Ejemplo n.º 9
0
def test_handler_hyperlist():
    run_spec = tag_test(base_spec, 'test_handler_hyperlist')
    run_spec.spec.param_file = '{}/param_file.csv'.format(here)
    result = new_function().run(run_spec, handler=my_func)
    print(result)
    assert len(
        result.status.iterations) == 3 + 1, 'hyper parameters test failed'
    verify_state(result)
Ejemplo n.º 10
0
def test_handler_project():
    spec = tag_test(base_spec, "test_handler_project")
    spec.metadata.project = "myproj"
    spec.metadata.labels = {"owner": "yaronh"}
    result = new_function().run(spec, handler=my_func)
    print(result)
    assert result.output("accuracy") == 16, "failed to run"
    verify_state(result)
Ejemplo n.º 11
0
def test_hyper_random():
    grid_params = {"p2": [2, 1, 3], "p3": [10, 20, 30]}
    run_spec = tag_test(base_spec, "test_hyper_random")
    run_spec.with_hyper_params(grid_params, selector="r1", strategy="random")
    run_spec.spec.parameters["MAX_RANDOM_EVALS"] = 5
    run = new_function().run(run_spec, handler=hyper_func)

    verify_state(run)
    assert len(run.status.iterations) == 1 + 5, "wrong number of iterations"
Ejemplo n.º 12
0
def test_run_local_obj():
    spec = tag_test(base_spec, 'test_run_local_handler')
    spec.spec.handler = 'training'
    nbpath = '{}/mlrun_jobs.ipynb'.format(examples_path)
    ymlpath = path.join(out_path, 'nbyaml.yaml')
    print('out path:', out_path, ymlpath)
    fn = code_to_function(filename=nbpath, kind='job').export(ymlpath)
    result = run_local(spec, command=fn, workdir=out_path)
    verify_state(result)
Ejemplo n.º 13
0
def test_simple_function():
    # Thread(target=create_function, args=(myfunction, 4444)).start()
    _thread.start_new_thread(create_function, (myfunction, 4444))
    time.sleep(2)

    spec = tag_test(base_spec, 'simple_function')
    result = new_function(command='http://localhost:4444').run(spec)
    print(result)
    verify_state(result)
Ejemplo n.º 14
0
def test_run_local_obj():
    spec = tag_test(base_spec, "test_run_local_obj")
    spec.spec.handler = "training"
    nbpath = f"{examples_path}/mlrun_jobs.ipynb"
    ymlpath = path.join(out_path, "nbyaml.yaml")
    print("out path:", out_path, ymlpath)
    fn = code_to_function(filename=nbpath, kind="job").export(ymlpath)
    result = run_local(spec, command=fn, workdir=out_path)
    verify_state(result)
Ejemplo n.º 15
0
def test_run_local_from_func():
    spec = tag_test(base_spec, "test_run_local_from_func")
    spec.spec.handler = "training"
    nb_path = f"{examples_path}/mlrun_jobs.ipynb"
    nbyml_path = path.join(out_path, "nbyaml.yaml")
    print("out path:", out_path, nbyml_path)
    fn = code_to_function(filename=nb_path, kind="job").export(nbyml_path)
    result = fn.run(spec, workdir=out_path, local=True)
    verify_state(result)
Ejemplo n.º 16
0
def test_hyper_custom():
    run_spec = tag_test(base_spec, "test_hyper_custom")
    run = new_function().run(run_spec, handler=custom_hyper_func)
    verify_state(run)
    assert len(run.status.iterations) == 1 + 4, "wrong number of iterations"

    results = [line[3] for line in run.status.iterations[1:]]
    print(results)
    assert run.output("best_iteration") == 3, "wrong best iteration"
Ejemplo n.º 17
0
def test_handler_hyper():
    run_spec = tag_test(base_spec, 'test_handler_hyper')
    run_spec.with_hyper_params({'p1': [1, 5, 3]}, selector='max.accuracy')
    result = new_function().run(run_spec, handler=my_func)
    print(result)
    assert len(
        result.status.iterations) == 3 + 1, 'hyper parameters test failed'
    assert (result.status.results['best_iteration'] == 2
            ), 'failed to select best iteration'
    verify_state(result)
Ejemplo n.º 18
0
def test_hyper_function():
    # Thread(target=create_function, args=(myfunction, 4444))
    _thread.start_new_thread(create_function, (myfunction, 4444))
    time.sleep(2)

    spec = tag_test(base_spec, 'hyper_function')
    spec.spec.hyperparams = {'p1': [1, 2, 3]}
    result = new_function(command='http://localhost:4444').run(spec)
    print(result)
    verify_state(result)
Ejemplo n.º 19
0
def test_handler_hyper():
    run_spec = tag_test(base_spec, "test_handler_hyper")
    run_spec.with_hyper_params({"p1": [1, 5, 3]}, selector="max.accuracy")
    result = new_function().run(run_spec, handler=my_func)
    print(result)
    assert len(
        result.status.iterations) == 3 + 1, "hyper parameters test failed"
    assert (result.status.results["best_iteration"] == 2
            ), "failed to select best iteration"
    verify_state(result)
Ejemplo n.º 20
0
def test_local_no_context():
    spec = tag_test(base_spec, 'test_local_no_context')
    spec.spec.parameters = {'xyz': '789'}
    result = new_function(command='{}/no_ctx.py'.format(here),
                          mode='noctx').run(spec)
    verify_state(result)

    db = get_run_db().connect()
    state, log = db.get_log(result.metadata.uid)
    log = str(log)
    print(state)
    print(log)
    assert log.find(", '--xyz', '789']") != -1, 'params not detected in noctx'
Ejemplo n.º 21
0
def test_local_no_context():
    spec = tag_test(base_spec, "test_local_no_context")
    spec.spec.parameters = {"xyz": "789"}
    result = new_function(command="{}/no_ctx.py".format(tests_root_directory),
                          mode="noctx").run(spec)
    verify_state(result)

    db = get_run_db()
    state, log = db.get_log(result.metadata.uid)
    log = str(log)
    print(state)
    print(log)
    assert log.find(", '--xyz', '789']") != -1, "params not detected in noctx"
Ejemplo n.º 22
0
def test_hyper_grid_parallel():
    grid_params = '{"p2": [2,1,3], "p3": [10,20]}'
    mlrun.datastore.set_in_memory_item("params.json", grid_params)

    run_spec = tag_test(base_spec, "test_hyper_grid")
    run_spec.with_param_file(
        "memory://params.json", selector="r1", strategy="grid", parallel_runs=2
    )
    run = new_function().run(run_spec, handler=hyper_func)

    verify_state(run)
    # 3 x p2, 2 x p3 = 6 iterations + 1 header line
    assert len(run.status.iterations) == 1 + 2 * 3, "wrong number of iterations"
Ejemplo n.º 23
0
def test_hyper_list():
    list_params = '{"p2": [2,3,1], "p3": [10,30,20]}'
    mlrun.datastore.set_in_memory_item("params.json", list_params)

    run_spec = tag_test(base_spec, "test_hyper_list")
    run_spec.with_param_file("memory://params.json", selector="r1", strategy="list")
    run = new_function().run(run_spec, handler=hyper_func)

    verify_state(run)
    assert len(run.status.iterations) == 1 + 3, "wrong number of iterations"

    results = [line[5] for line in run.status.iterations[1:]]
    assert results == [20, 90, 20], "unexpected results"
    assert run.output("best_iteration") == 2, "wrong best iteration"
Ejemplo n.º 24
0
def test_hyper_grid():
    grid_params = '{"p2": [2,1,3], "p3": [10,20]}'
    mlrun.datastore.set_in_memory_item("params.json", grid_params)

    run_spec = tag_test(base_spec, "test_hyper_grid")
    run_spec.with_param_file("memory://params.json", selector="r1", strategy="grid")
    run = new_function().run(run_spec, handler=hyper_func)

    verify_state(run)
    # 3 x p2, 2 x p3 = 6 iterations + 1 header line
    assert len(run.status.iterations) == 1 + 2 * 3, "wrong number of iterations"

    results = [line[5] for line in run.status.iterations[1:]]
    assert results == [20, 10, 30, 40, 20, 60], "unexpected results"
    assert run.output("best_iteration") == 6, "wrong best iteration"
Ejemplo n.º 25
0
def test_hyper_list_with_stop():
    list_params = '{"p2": [2,3,7,4,5], "p3": [10,10,10,10,10]}'
    mlrun.datastore.set_in_memory_item("params.json", list_params)

    run_spec = tag_test(base_spec, "test_hyper_list_with_stop")
    run_spec.with_param_file(
        "memory://params.json",
        selector="max.r1",
        strategy="list",
        stop_condition="r1>=70",
    )
    run = new_function().run(run_spec, handler=hyper_func)

    verify_state(run)
    # result: r1 = p2 * p3, r1 >= 70 lead to stop on third run
    assert len(run.status.iterations) == 1 + 3, "wrong number of iterations"
    assert run.output("best_iteration") == 3, "wrong best iteration"
Ejemplo n.º 26
0
def test_run_local_with_uid_does_not_exist(monkeypatch):
    """
    Mocking a scenario that happened in field in which getuser raised the same error as the mock
    The problem was basically that the code was
    environ.get("V3IO_USERNAME", getpass.getuser())
    instead of
    environ.get("V3IO_USERNAME") or getpass.getuser()
    """
    def mock_getpwuid_raise(*args, **kwargs):
        raise KeyError("getpwuid(): uid not found: 400")

    environ["V3IO_USERNAME"] = "******"
    monkeypatch.setattr(getpass, "getuser", mock_getpwuid_raise)
    spec = tag_test(base_spec, "test_run_local")
    result = run_local(spec,
                       command=f"{examples_path}/training.py",
                       workdir=examples_path)
    verify_state(result)
Ejemplo n.º 27
0
def test_run_local():
    spec = tag_test(base_spec, "test_run_local")
    result = run_local(spec,
                       command=f"{examples_path}/training.py",
                       workdir=examples_path)
    verify_state(result)
Ejemplo n.º 28
0
def test_dask_local():
    spec = tag_test(NewTask(params={'p1': 3, 'p2': 'vv'}), 'test_dask_local')
    run = new_function(kind='dask').run(spec, handler=my_func)
    verify_state(run)
Ejemplo n.º 29
0
def test_local_handler():
    spec = tag_test(base_spec, "test_local_runtime")
    result = new_function(command="{}/handler.py".format(examples_path)).run(
        spec, handler="my_func")
    verify_state(result)
Ejemplo n.º 30
0
def test_run_local():
    spec = tag_test(base_spec, 'test_run_local')
    result = run_local(spec,
                       command='{}/training.py'.format(examples_path),
                       workdir=examples_path)
    verify_state(result)